code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Dict = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __A ):
def __init__( self , *A_ , **A_ ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 643 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "facebook/bart-large-mnli"
SCREAMING_SNAKE_CASE : Union[str, Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
SCREAMING_SNAKE_CASE : List[str] = "text_classifier"
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer
SCREAMING_SNAKE_CASE : int = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Any = ["text", ["text"]]
SCREAMING_SNAKE_CASE : Tuple = ["text"]
def UpperCamelCase ( self : Any ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(_UpperCamelCase )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : str ):
A = labels
return self.pre_processor(
[text] * len(_UpperCamelCase ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : str ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _A ( __A , __A ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = 1
@register_to_config
def __init__( self ,SCREAMING_SNAKE_CASE_=2000 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=20 ,SCREAMING_SNAKE_CASE_=1E-3 ):
'''simple docstring'''
snake_case : Optional[int] = None
snake_case : Tuple = None
snake_case : Union[str, Any] = None
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Optional[Any] = torch.linspace(1 ,self.config.sampling_eps ,_UpperCamelCase ,device=_UpperCamelCase )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case : Tuple = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case : str = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case : Tuple = std.unsqueeze(-1 )
snake_case : Any = -score / std
# compute
snake_case : List[str] = -1.0 / len(self.timesteps )
snake_case : Dict = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case : str = beta_t.unsqueeze(-1 )
snake_case : Dict = -0.5 * beta_t * x
snake_case : int = torch.sqrt(_UpperCamelCase )
snake_case : Union[str, Any] = drift - diffusion**2 * score
snake_case : Union[str, Any] = x + drift * dt
# add noise
snake_case : Any = randn_tensor(x.shape ,layout=x.layout ,generator=_UpperCamelCase ,device=x.device ,dtype=x.dtype )
snake_case : List[str] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 36 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : List[Any] = "The quick brown fox jumps over the lazy dog" , ) -> str:
lowercase : Dict =set()
# Replace all the whitespace in our sentence
lowercase : Any =input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE__ ) == 26
def _lowerCAmelCase ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> Any:
lowercase : str =[False] * 26
for char in input_str:
if char.islower():
lowercase : str =True
elif char.isupper():
lowercase : List[Any] =True
return all(SCREAMING_SNAKE_CASE__ )
def _lowerCAmelCase ( __magic_name__ : Optional[int] = "The quick brown fox jumps over the lazy dog" , ) -> List[str]:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _lowerCAmelCase ( ) -> Tuple:
from timeit import timeit
lowercase : Optional[int] ='''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE__ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE__ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 92 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
__snake_case : Any = key - 1
if key <= 0:
raise ValueError("""Height of grid can\'t be 0 or negative""" )
if key == 1 or len(SCREAMING_SNAKE_CASE__ ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE__ ):
__snake_case : Tuple = position % (lowest * 2) # puts it in bounds
__snake_case : str = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE__ )
__snake_case : List[str] = ["""""".join(SCREAMING_SNAKE_CASE__ ) for row in temp_grid]
__snake_case : Any = """""".join(SCREAMING_SNAKE_CASE__ )
return output_string
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = key - 1
if key <= 0:
raise ValueError("""Height of grid can\'t be 0 or negative""" )
if key == 1:
return input_string
__snake_case : Optional[int] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE__ ) ):
__snake_case : Dict = position % (lowest * 2) # puts it in bounds
__snake_case : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__snake_case : List[str] = 0
for row in temp_grid: # fills in the characters
__snake_case : Any = input_string[counter : counter + len(SCREAMING_SNAKE_CASE__ )]
grid.append(list(SCREAMING_SNAKE_CASE__ ) )
counter += len(SCREAMING_SNAKE_CASE__ )
__snake_case : Optional[int] = """""" # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE__ ) ):
__snake_case : int = position % (lowest * 2) # puts it in bounds
__snake_case : Tuple = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): # tries every key
__snake_case : Dict = decrypt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < k or k < 0:
raise ValueError('Invalid Input' )
SCREAMING_SNAKE_CASE_ : List[Any] =sum(array[:k] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - k ):
SCREAMING_SNAKE_CASE_ : Any =current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =max(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__SCREAMING_SNAKE_CASE = [randint(-1000, 1000) for i in range(100)]
__SCREAMING_SNAKE_CASE = randint(0, 110)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 220 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
a_ = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 437 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = []
for d in reversed(SCREAMING_SNAKE_CASE__ ):
idx.append(flat_idx % d )
_UpperCamelCase : Any = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE__ ) )
@torch.jit.ignore
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = None ,lowercase_ = None ,) -> str:
"""simple docstring"""
def reduce_edge_list(lowercase_ ) -> None:
_UpperCamelCase : int = True
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_UpperCamelCase : Union[str, Any] = -1 * (i + 1)
l[reversed_idx] &= tally
_UpperCamelCase : Any = l[reversed_idx]
if start_edges is None:
_UpperCamelCase : Optional[Any] = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE__ )
if end_edges is None:
_UpperCamelCase : Tuple = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
reduce_edge_list(SCREAMING_SNAKE_CASE__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
_UpperCamelCase : str = []
_UpperCamelCase : Union[str, Any] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE__ ,s + 1 ) )
else:
break
_UpperCamelCase : Union[str, Any] = tuple(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCamelCase : Optional[int] = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE__ ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCamelCase : Any = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE__ ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_UpperCamelCase : Tuple = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = t.shape[:no_batch_dims]
_UpperCamelCase : Union[str, Any] = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
# _get_minimal_slice_set is inclusive
_UpperCamelCase : str = list(_flat_idx_to_idx(flat_end - 1 ,SCREAMING_SNAKE_CASE__ ) )
# Get an ordered list of slices to perform
_UpperCamelCase : int = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,)
_UpperCamelCase : Optional[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = False ,lowercase_ = None ,lowercase_ = False ,) -> Union[str, Any]:
"""simple docstring"""
if not (len(SCREAMING_SNAKE_CASE__ ) > 0):
raise ValueError("Must provide at least one input" )
_UpperCamelCase : Optional[int] = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE__ )]
_UpperCamelCase : Any = tuple([max(SCREAMING_SNAKE_CASE__ ) for s in zip(*SCREAMING_SNAKE_CASE__ )] )
def _prep_inputs(lowercase_ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_UpperCamelCase : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_UpperCamelCase : Any = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
_UpperCamelCase : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_UpperCamelCase : Optional[Any] = tensor_tree_map(_prep_inputs ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Union[str, Any] = None
if _out is not None:
_UpperCamelCase : Dict = tensor_tree_map(lambda lowercase_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
_UpperCamelCase : int = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_UpperCamelCase : Optional[int] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase_ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Union[str, Any] = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Chunk the input
if not low_mem:
_UpperCamelCase : int = _select_chunk
else:
_UpperCamelCase : Union[str, Any] = partial(
_chunk_slice ,flat_start=SCREAMING_SNAKE_CASE__ ,flat_end=min(SCREAMING_SNAKE_CASE__ ,i + chunk_size ) ,no_batch_dims=len(SCREAMING_SNAKE_CASE__ ) ,)
_UpperCamelCase : Union[str, Any] = tensor_tree_map(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Run the layer on the chunk
_UpperCamelCase : Optional[int] = layer(**SCREAMING_SNAKE_CASE__ )
# Allocate space for the output
if out is None:
_UpperCamelCase : Dict = tensor_tree_map(lambda lowercase_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,SCREAMING_SNAKE_CASE__ )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
def assign(lowercase_ ,lowercase_ ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
assign(SCREAMING_SNAKE_CASE__ ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_UpperCamelCase : List[Any] = da[k]
assign(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
for xa, xa in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_UpperCamelCase : List[Any] = xa
elif isinstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_UpperCamelCase : int = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
_UpperCamelCase : Optional[Any] = tensor_tree_map(lambda lowercase_ : t.view(orig_batch_dims + t.shape[1:] ) ,SCREAMING_SNAKE_CASE__ )
return out
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : int = 512 , ) -> Dict:
_UpperCamelCase : Dict = max_chunk_size
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : List[Any] = None
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Callable , __a : tuple , __a : int ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_UpperCamelCase : str = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_UpperCamelCase : Any = [c for c in candidates if c > min_chunk_size]
_UpperCamelCase : str = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*_UpperCamelCase , chunk_size=_UpperCamelCase )
return True
except RuntimeError:
return False
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Optional[Any] = len(_UpperCamelCase ) - 1
while i > min_viable_chunk_size_index:
_UpperCamelCase : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
_UpperCamelCase : Optional[Any] = (min_viable_chunk_size_index + i) // 2
else:
_UpperCamelCase : str = i
_UpperCamelCase : Dict = (i + len(_UpperCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Iterable , __a : Iterable ) -> bool:
_UpperCamelCase : List[str] = True
for aa, aa in zip(_UpperCamelCase , _UpperCamelCase ):
assert type(_UpperCamelCase ) == type(_UpperCamelCase )
if isinstance(_UpperCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCamelCase : Any = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
_UpperCamelCase : List[Any] = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase )
else:
consistent &= aa == aa
return consistent
def __SCREAMING_SNAKE_CASE ( self : str , __a : Callable , __a : tuple , __a : int , ) -> int:
_UpperCamelCase : Any = True
_UpperCamelCase : Optional[int] = tree_map(lambda __a : a.shape if isinstance(_UpperCamelCase , torch.Tensor ) else a , _UpperCamelCase , _UpperCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_UpperCamelCase )
_UpperCamelCase : int = self._compare_arg_caches(self.cached_arg_data , _UpperCamelCase )
else:
# Otherwise, we can reuse the precomputed value
_UpperCamelCase : Optional[Any] = False
if not consistent:
_UpperCamelCase : Optional[Any] = self._determine_favorable_chunk_size(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
_UpperCamelCase : Any = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 624 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =cos(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =(1 - _cos) / 2
_UpperCamelCase =1 - _cos
_UpperCamelCase =1 + alpha
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =cos(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =(1 + _cos) / 2
_UpperCamelCase =-1 - _cos
_UpperCamelCase =1 + alpha
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =cos(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =_sin / 2
_UpperCamelCase =0
_UpperCamelCase =-ba
_UpperCamelCase =1 + alpha
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =cos(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =1 - alpha
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 + alpha
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) , ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =cos(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =10 ** (gain_db / 40)
_UpperCamelCase =1 + alpha * big_a
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha * big_a
_UpperCamelCase =1 + alpha / big_a
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha / big_a
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) , ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =cos(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =10 ** (gain_db / 40)
_UpperCamelCase =(big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase =(big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase =(big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase =(big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase =2 * sqrt(SCREAMING_SNAKE_CASE__ ) * alpha
_UpperCamelCase =big_a * (pmc + aaa)
_UpperCamelCase =2 * big_a * mpc
_UpperCamelCase =big_a * (pmc - aaa)
_UpperCamelCase =ppmc + aaa
_UpperCamelCase =-2 * pmpc
_UpperCamelCase =ppmc - aaa
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) , ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =cos(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =10 ** (gain_db / 40)
_UpperCamelCase =(big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase =(big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase =(big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase =(big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase =2 * sqrt(SCREAMING_SNAKE_CASE__ ) * alpha
_UpperCamelCase =big_a * (ppmc + aaa)
_UpperCamelCase =-2 * big_a * pmpc
_UpperCamelCase =big_a * (ppmc - aaa)
_UpperCamelCase =pmc + aaa
_UpperCamelCase =2 * mpc
_UpperCamelCase =pmc - aaa
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 404 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_A = get_tests_dir('''fixtures''')
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = mock.Mock()
lowerCAmelCase_ = 500
lowerCAmelCase_ = {}
lowerCAmelCase_ = HTTPError
lowerCAmelCase_ = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=_UpperCamelCase ) as mock_head:
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaises(_UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''', subfolder='''feature_extractor''' )
self.assertIsNotNone(_UpperCamelCase )
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''test-image-processor''', use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase, getattr(_UpperCamelCase, _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase, repo_id='''test-image-processor''', push_to_hub=_UpperCamelCase, use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase, getattr(_UpperCamelCase, _UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''', use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase, getattr(_UpperCamelCase, _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase, repo_id='''valid_org/test-image-processor-org''', push_to_hub=_UpperCamelCase, use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase, getattr(_UpperCamelCase, _UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
lowerCAmelCase_ = CustomImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''}, )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, '''CustomImageProcessor''' )
| 431 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Dict="pt" ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = {'''add_prefix_space''': True} if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not line.startswith(''' ''' ) else {}
UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = input_ids.ne(SCREAMING_SNAKE_CASE__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase__ ( __A ):
'''simple docstring'''
def __init__( self : Optional[int] , a__ : Dict , a__ : int , a__ : Tuple , a__ : int , a__ : Tuple="train" , a__ : Dict=None , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int="" , ):
super().__init__()
UpperCAmelCase = Path(_UpperCamelCase ).joinpath(type_path + '''.source''' )
UpperCAmelCase = Path(_UpperCamelCase ).joinpath(type_path + '''.target''' )
UpperCAmelCase = self.get_char_lens(self.src_file )
UpperCAmelCase = max_source_length
UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
UpperCAmelCase = tokenizer
UpperCAmelCase = prefix
if n_obs is not None:
UpperCAmelCase = self.src_lens[:n_obs]
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
def __len__( self : List[str] ):
return len(self.src_lens )
def __getitem__( self : Tuple , a__ : Tuple ):
UpperCAmelCase = index + 1 # linecache starts at 1
UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , _UpperCamelCase ).rstrip('''\n''' )
UpperCAmelCase = linecache.getline(str(self.tgt_file ) , _UpperCamelCase ).rstrip('''\n''' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
)
UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
UpperCAmelCase = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , '''right''' )
UpperCAmelCase = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , '''right''' )
UpperCAmelCase = source_inputs['''input_ids'''].squeeze()
UpperCAmelCase = target_inputs['''input_ids'''].squeeze()
UpperCAmelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __snake_case ( a__ : Tuple ):
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def __snake_case ( self : Tuple , a__ : Optional[Any] ):
UpperCAmelCase = torch.stack([x['''input_ids'''] for x in batch] )
UpperCAmelCase = torch.stack([x['''attention_mask'''] for x in batch] )
UpperCAmelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase = trim_batch(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase, UpperCAmelCase = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase )
UpperCAmelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
a__ : List[Any] = getLogger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> int:
"""simple docstring"""
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = get_git_info()
save_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''git_log.json''' ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any=4 , **SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def __snake_case ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = {
'''repo_id''': str(SCREAMING_SNAKE_CASE__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
"""simple docstring"""
return list(map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , SCREAMING_SNAKE_CASE__ )
def white_space_fix(SCREAMING_SNAKE_CASE_ : Dict ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ : Tuple ):
UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE__ ) ) ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = normalize_answer(SCREAMING_SNAKE_CASE__ ).split()
UpperCAmelCase = normalize_answer(SCREAMING_SNAKE_CASE__ ).split()
UpperCAmelCase = Counter(SCREAMING_SNAKE_CASE__ ) & Counter(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase = 1.0 * num_same / len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = 1.0 * num_same / len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
return normalize_answer(SCREAMING_SNAKE_CASE__ ) == normalize_answer(SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
em += exact_match_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
em /= len(SCREAMING_SNAKE_CASE__ )
return {"em": em}
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase = '''dropout_rate'''
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not hasattr(SCREAMING_SNAKE_CASE__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(SCREAMING_SNAKE_CASE__ ) )
delattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
UpperCAmelCase = p if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
delattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return hparams, config
| 51 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE__ : str = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __A ):
__SCREAMING_SNAKE_CASE = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
__SCREAMING_SNAKE_CASE = field(default=__A , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
__SCREAMING_SNAKE_CASE = field(
default=__A , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__SCREAMING_SNAKE_CASE = field(default=__A , metadata={'''help''': '''whether to use adafactor'''} )
__SCREAMING_SNAKE_CASE = field(
default=__A , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
__SCREAMING_SNAKE_CASE = field(
default=__A , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
__SCREAMING_SNAKE_CASE = field(default=__A , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
__SCREAMING_SNAKE_CASE = field(
default=__A , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
__SCREAMING_SNAKE_CASE = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 643 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : str=None , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : str ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] ) ->Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
snake_case_ = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , *_UpperCamelCase : List[Any] ) ->Union[str, Any]:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = self.seq_length // 2
snake_case_ = 0
# first forward pass
snake_case_, snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case_ = ids_tensor((1,) , _UpperCamelCase ).item() + 1
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case_ = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) ->int:
snake_case_ = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False ) ->Dict:
snake_case_ = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , *_UpperCamelCase : Dict ) ->Dict:
snake_case_ = BioGptModel(_UpperCamelCase )
snake_case_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , *_UpperCamelCase : List[str] ) ->int:
snake_case_ = self.num_labels
snake_case_ = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = BioGptModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : str ) ->int:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = '''left'''
# Define PAD Token = EOS Token = 50256
snake_case_ = tokenizer.eos_token
snake_case_ = model.config.eos_token_id
# use different length sentences to test batching
snake_case_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
snake_case_ = inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
snake_case_ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase )
snake_case_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
snake_case_ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__( self : Optional[int] ) ->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = '''multi_label_classification'''
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : int ) ->Any:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
snake_case_ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 4_2_3_8_4
snake_case_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
snake_case_ = model.generate(
**_UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCamelCase , )
snake_case_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = len(SCREAMING_SNAKE_CASE__ )
while cur > 1:
# Find the maximum number in arr
A = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE__ )]
# Reverse whole list
A = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE__ )]
cur -= 1
return arr
if __name__ == "__main__":
_UpperCAmelCase = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 699 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ = (boundary[1] - boundary[0]) / steps
snake_case_ = boundary[0]
snake_case_ = boundary[1]
snake_case_ = make_points(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE__ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
return y
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = a + h
while x < (b - h):
yield x
snake_case_ = x + h
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # enter your function here
snake_case_ = (x - 0) * (x - 0)
return y
def __SCREAMING_SNAKE_CASE ():
snake_case_ = 0.0 # Lower bound of integration
snake_case_ = 1.0 # Upper bound of integration
snake_case_ = 10.0 # define number of steps or resolution
snake_case_ = [a, b] # define boundary of integration
snake_case_ = method_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main() | 39 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : List[Any] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowercase ( __A : int ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def lowercase ( __A : Any ) -> Any:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case : Optional[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
def lowercase ( __A : List[str] , __A : Tuple ) -> List[Any]:
'''simple docstring'''
if exitstatus == 5:
snake_case : Optional[int] = 0
# Doctest custom flag to ignore output.
__lowercase : int = doctest.register_optionflag('''IGNORE_RESULT''')
__lowercase : Optional[Any] = doctest.OutputChecker
class _A ( __A ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowercase : Dict = CustomOutputChecker
__lowercase : Union[str, Any] = HfDoctestModule
__lowercase : List[Any] = HfDocTestParser
| 36 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 39 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase_ = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
UpperCamelCase_ = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class __SCREAMING_SNAKE_CASE ( __A ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = SqueezeBertTokenizer
def __init__( self : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : int="[SEP]" , UpperCAmelCase__ : int="[PAD]" , UpperCAmelCase__ : List[str]="[CLS]" , UpperCAmelCase__ : List[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
lowercase : List[str] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
lowercase : List[Any] =getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
lowercase : Any =do_lower_case
lowercase : Dict =strip_accents
lowercase : Dict =tokenize_chinese_chars
lowercase : Optional[Any] =normalizer_class(**_UpperCamelCase )
lowercase : Union[str, Any] =do_lower_case
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any]=None ):
'''simple docstring'''
lowercase : List[str] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : Any =[self.sep_token_id]
lowercase : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase : List[str] =self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 92 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 | 0 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A ( __A ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
__snake_case : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , """num_heads""" ) )
class _A :
def __init__( self : Any , __magic_name__ : int , __magic_name__ : List[Any]=13 , __magic_name__ : str=64 , __magic_name__ : List[str]=3 , __magic_name__ : Union[str, Any]=[16, 48, 96] , __magic_name__ : Dict=[1, 3, 6] , __magic_name__ : int=[1, 2, 10] , __magic_name__ : Any=[7, 3, 3] , __magic_name__ : Union[str, Any]=[4, 2, 2] , __magic_name__ : List[str]=[2, 1, 1] , __magic_name__ : List[Any]=[2, 2, 2] , __magic_name__ : Tuple=[False, False, True] , __magic_name__ : List[Any]=[0.0, 0.0, 0.0] , __magic_name__ : int=0.02 , __magic_name__ : Any=1E-12 , __magic_name__ : Dict=True , __magic_name__ : List[str]=True , __magic_name__ : List[Any]=2 , ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : int = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : List[Any] = patch_sizes
__snake_case : List[Any] = patch_stride
__snake_case : List[str] = patch_padding
__snake_case : Any = is_training
__snake_case : List[Any] = use_labels
__snake_case : Optional[int] = num_labels
__snake_case : Optional[int] = num_channels
__snake_case : List[Any] = embed_dim
__snake_case : Union[str, Any] = num_heads
__snake_case : Optional[Any] = stride_kv
__snake_case : Optional[Any] = depth
__snake_case : List[str] = cls_token
__snake_case : List[Any] = attention_drop_rate
__snake_case : Dict = initializer_range
__snake_case : List[Any] = layer_norm_eps
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = CvtModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : int = model(_UpperCamelCase )
__snake_case : Optional[int] = (self.image_size, self.image_size)
__snake_case , __snake_case : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__snake_case : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__snake_case : Any = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Dict = CvtForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Optional[int] = config_and_inputs
__snake_case : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __A , __A , unittest.TestCase ):
lowercase__: Tuple = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowercase__: Dict = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
lowercase__: Optional[Any] = False
lowercase__: Optional[int] = False
lowercase__: Dict = False
lowercase__: Optional[Any] = False
lowercase__: Tuple = False
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = CvtModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
return
@unittest.skip(reason="""Cvt does not output attentions""" )
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def lowercase__ ( self : Any ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(_UpperCamelCase )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any ):
__snake_case : int = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__snake_case : List[Any] = outputs.hidden_states
__snake_case : Any = len(self.model_tester.depth )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[str] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@slow
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = CvtModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _a ( ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCamelCase )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : int = prepare_img()
__snake_case : str = image_processor(images=_UpperCamelCase , return_tensors="""pt""" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
__snake_case : str = model(**_UpperCamelCase )
# verify the logits
__snake_case : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
__snake_case : Optional[int] = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 26 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 220 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 0 |
"""simple docstring"""
from __future__ import annotations
a_ = 1.6021e-19 # units = C
def UpperCAmelCase_ ( __a : int , __a : Dict , __a : Optional[Any] , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 | 0 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCamelCase__ = "sshleifer/bart-tiny-random"
lowerCamelCase__ = "patrickvonplaten/t5-tiny-random"
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return AutoConfig.from_pretrained(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase, *_UpperCamelCase : Tuple = create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase, *_UpperCamelCase : List[Any] = create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=1 , d=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase, *_UpperCamelCase : List[Any] = create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=1 , d=_UpperCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
_UpperCamelCase, *_UpperCamelCase : Optional[Any] = create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
with self.assertRaises(_UpperCamelCase ):
create_student_by_copying_alternating_layers(_UpperCamelCase , tempfile.mkdtemp() , e=_UpperCamelCase , d=_UpperCamelCase )
| 624 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
from timeit import timeit
__lowerCamelCase : Tuple = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =0
_UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // 2
_UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return s == s[::-1]
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =f'''all({name}(key) is value for key, value in test_data.items())'''
_UpperCamelCase =f'''from __main__ import test_data, {name}'''
_UpperCamelCase =50_0000
_UpperCamelCase =timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 404 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
from __future__ import annotations
import math
def __UpperCamelCase ( _A ):
if num <= 0:
lowerCAmelCase_ = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = [True] * (num + 1)
lowerCAmelCase_ = []
lowerCAmelCase_ = 2
lowerCAmelCase_ = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE__ ):
if sieve[i] is True:
lowerCAmelCase_ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 431 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Any=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , a__ : str , a__ : Tuple=13 , a__ : Union[str, Any]=7 , a__ : Optional[int]=True , a__ : Tuple=False , a__ : str=99 , a__ : List[str]=16 , a__ : Any=2 , a__ : List[str]=4 , a__ : Union[str, Any]=4 , a__ : Tuple="gelu" , a__ : List[str]=0.1 , a__ : int=0.1 , a__ : Union[str, Any]=32 , a__ : int=2 , a__ : Optional[int]=1 , a__ : Any=0 , a__ : Union[str, Any]=0.02 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = initializer_range
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase = shift_tokens_right(_UpperCamelCase , 1 , 2 )
UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , )
UpperCAmelCase = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def __snake_case ( self : List[Any] ):
UpperCAmelCase, UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __snake_case ( self : List[Any] , a__ : Optional[int] , a__ : List[Any] , a__ : Optional[Any] ):
UpperCAmelCase = 20
UpperCAmelCase = model_class_name(_UpperCamelCase )
UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase, UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
UpperCAmelCase = model.decode(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def __snake_case ( self : Any , a__ : str , a__ : Union[str, Any] , a__ : Optional[Any] ):
UpperCAmelCase = 20
UpperCAmelCase = model_class_name(_UpperCamelCase )
UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase, UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
UpperCAmelCase = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =99
def __snake_case ( self : Tuple ):
UpperCAmelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase = input_ids.shape[0]
UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __snake_case ( self : Any ):
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = self._get_config_and_data()
UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
UpperCAmelCase = lm_model(input_ids=_UpperCamelCase )
UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
UpperCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
UpperCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def __snake_case ( self : Tuple ):
UpperCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase = shift_tokens_right(_UpperCamelCase , 1 , 2 )
UpperCAmelCase = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
UpperCAmelCase = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase__ ( __A , unittest.TestCase , __A ):
'''simple docstring'''
_lowerCamelCase =True
_lowerCamelCase =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowerCamelCase =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __snake_case ( self : List[str] ):
UpperCAmelCase = FlaxBlenderbotModelTester(self )
def __snake_case ( self : int ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case ( self : List[str] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case ( self : List[Any] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(a__ : str , a__ : Union[str, Any]=None , **a__ : List[str] ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __snake_case ( self : List[str] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = model_class(_UpperCamelCase )
UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(a__ : List[str] , a__ : Any , a__ : List[Any] ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __snake_case ( self : Dict ):
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_UpperCamelCase )
UpperCAmelCase = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase = ['''Sam''']
UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''jax''' )
UpperCAmelCase = model.generate(**_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 51 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
SCREAMING_SNAKE_CASE__ : str = '__DUMMY_TRANSFORMERS_USER__'
SCREAMING_SNAKE_CASE__ : Optional[int] = 'Dummy User'
SCREAMING_SNAKE_CASE__ : int = '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'
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://hub-ci.huggingface.co'
SCREAMING_SNAKE_CASE__ : str = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
SCREAMING_SNAKE_CASE__ : Optional[int] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
SCREAMING_SNAKE_CASE__ : List[str] = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def a__ ( snake_case__ : Optional[int] ):
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def a__ ( snake_case__ : Optional[Any] ):
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , SCREAMING_SNAKE_CASE__ )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def a__ ( snake_case__ : str ):
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def a__ ( snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def a__ ( ):
return HfApi(endpoint=SCREAMING_SNAKE_CASE__ )
@pytest.fixture(scope="""session""" )
def a__ ( snake_case__ : Dict ):
_UpperCAmelCase : List[str] = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def a__ ( snake_case__ : List[str] ):
def _cleanup_repo(snake_case__ : Any ):
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def a__ ( snake_case__ : int ):
@contextmanager
def _temporary_repo(snake_case__ : str ):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE__ )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def a__ ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
_UpperCAmelCase : List[str] = f'''repo_txt_data-{int(time.time() * 1_0e3 )}'''
_UpperCAmelCase : List[Any] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo="""data/text_data.txt""" , repo_id=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str] ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] ):
_UpperCAmelCase : Optional[int] = f'''repo_zipped_txt_data-{int(time.time() * 1_0e3 )}'''
_UpperCAmelCase : Union[str, Any] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo="""data.zip""" , repo_id=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Optional[Any] ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( snake_case__ : str , snake_case__ : str , snake_case__ : str ):
_UpperCAmelCase : Union[str, Any] = f'''repo_zipped_img_data-{int(time.time() * 1_0e3 )}'''
_UpperCAmelCase : Dict = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo="""data.zip""" , repo_id=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[Any] ):
return hf_private_dataset_repo_zipped_img_data_
| 643 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCamelCase (lowerCAmelCase : str = 3 ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(SCREAMING_SNAKE_CASE__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
A = QuantumRegister(SCREAMING_SNAKE_CASE__, 'qr' )
A = ClassicalRegister(SCREAMING_SNAKE_CASE__, 'cr' )
A = QuantumCircuit(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
A = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j), SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__, number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# simulate with 10000 shots
A = Aer.get_backend('qasm_simulator' )
A = execute(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, shots=10_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 699 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _A ( __A ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : int = None
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCamelCase ,"""feature_size""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""sampling_rate""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""padding_value""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = self.feat_extract_tester.prepare_inputs_for_common()
snake_case : str = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : Optional[Any] = feat_extract.model_input_names[0]
snake_case : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCamelCase ) == len(_UpperCamelCase ) for x, y in zip(_UpperCamelCase ,processed_features[input_name] ) ) )
snake_case : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCamelCase )
snake_case : List[Any] = BatchFeature({input_name: speech_inputs} ,tensor_type="""np""" )
snake_case : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCamelCase )
snake_case : str = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : Any = feat_extract.model_input_names[0]
snake_case : Optional[Any] = BatchFeature({input_name: speech_inputs} ,tensor_type="""pt""" )
snake_case : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCamelCase )
snake_case : str = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : Union[str, Any] = feat_extract.model_input_names[0]
snake_case : List[str] = BatchFeature({input_name: speech_inputs} ,tensor_type="""tf""" )
snake_case : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ):
snake_case : List[Any] = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCamelCase ,_UpperCamelCase ):
if not np.allclose(np.asarray(_UpperCamelCase ) ,np.asarray(_UpperCamelCase ) ,atol=1E-3 ):
return False
return True
snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCamelCase )
snake_case : Optional[Any] = feat_extract.model_input_names[0]
snake_case : List[Any] = BatchFeature({input_name: speech_inputs} )
snake_case : Dict = self.feat_extract_tester.seq_length_diff
snake_case : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff
snake_case : Optional[int] = self.feat_extract_tester.min_seq_length
snake_case : str = self.feat_extract_tester.batch_size
snake_case : Dict = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
snake_case : int = feat_extract.pad(_UpperCamelCase ,padding=_UpperCamelCase )
snake_case : List[Any] = input_a[input_name]
snake_case : Tuple = feat_extract.pad(_UpperCamelCase ,padding="""longest""" )
snake_case : Optional[int] = input_a[input_name]
snake_case : List[str] = feat_extract.pad(_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[-1] ) )
snake_case : Optional[int] = input_a[input_name]
snake_case : Any = feat_extract.pad(_UpperCamelCase ,padding="""longest""" ,return_tensors="""np""" )
snake_case : Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_UpperCamelCase ):
feat_extract.pad(_UpperCamelCase ,padding="""max_length""" )[input_name]
snake_case : Dict = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=_UpperCamelCase ,return_tensors="""np""" )
snake_case : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCamelCase ,_UpperCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
snake_case : str = feat_extract.pad(_UpperCamelCase ,pad_to_multiple_of=10 )
snake_case : Dict = input_a[input_name]
snake_case : Any = feat_extract.pad(_UpperCamelCase ,padding="""longest""" ,pad_to_multiple_of=10 )
snake_case : str = input_a[input_name]
snake_case : Dict = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,pad_to_multiple_of=10 ,max_length=_UpperCamelCase )
snake_case : Any = input_a[input_name]
snake_case : str = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,pad_to_multiple_of=10 ,max_length=_UpperCamelCase ,return_tensors="""np""" ,)
snake_case : str = input_a[input_name]
self.assertTrue(all(len(_UpperCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_UpperCamelCase ,_UpperCamelCase ) )
snake_case : Dict = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_UpperCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
snake_case : Tuple = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ):
snake_case : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCamelCase ,_UpperCamelCase ):
if not np.allclose(np.asarray(_UpperCamelCase ) ,np.asarray(_UpperCamelCase ) ,atol=1E-3 ):
return False
return True
snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCamelCase )
snake_case : Any = feat_extract.model_input_names[0]
snake_case : Any = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
snake_case : List[str] = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[0] ) ,truncation=_UpperCamelCase )
snake_case : Dict = input_a[input_name]
snake_case : Union[str, Any] = feat_extract.pad(_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[0] ) )
snake_case : List[str] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCamelCase ) )
# truncate to smallest with np
snake_case : str = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[0] ) ,return_tensors="""np""" ,truncation=_UpperCamelCase ,)
snake_case : str = input_a[input_name]
snake_case : Dict = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[0] ) ,return_tensors="""np""" )
snake_case : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCamelCase ) )
# truncate to middle
snake_case : str = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[1] ) ,truncation=_UpperCamelCase ,return_tensors="""np""" ,)
snake_case : Optional[Any] = input_a[input_name]
snake_case : str = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[1] ) ,truncation=_UpperCamelCase )
snake_case : Optional[Any] = input_a[input_name]
snake_case : List[str] = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[1] ) ,return_tensors="""np""" )
snake_case : int = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCamelCase ,_UpperCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCamelCase ):
feat_extract.pad(_UpperCamelCase ,truncation=_UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCamelCase ):
feat_extract.pad(_UpperCamelCase ,padding="""longest""" ,truncation=_UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCamelCase ):
feat_extract.pad(_UpperCamelCase ,padding="""longest""" ,truncation=_UpperCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_UpperCamelCase ):
feat_extract.pad(_UpperCamelCase ,padding="""max_length""" ,truncation=_UpperCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
snake_case : List[Any] = 12
snake_case : List[Any] = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_UpperCamelCase ,truncation=_UpperCamelCase ,)
snake_case : Optional[int] = input_a[input_name]
snake_case : List[Any] = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_UpperCamelCase ,)
snake_case : Tuple = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
snake_case : Optional[int] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
snake_case : Dict = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCamelCase ) )
def snake_case_ ( self ):
'''simple docstring'''
self._check_padding(numpify=_UpperCamelCase )
def snake_case_ ( self ):
'''simple docstring'''
self._check_padding(numpify=_UpperCamelCase )
def snake_case_ ( self ):
'''simple docstring'''
self._check_truncation(numpify=_UpperCamelCase )
def snake_case_ ( self ):
'''simple docstring'''
self._check_truncation(numpify=_UpperCamelCase )
@require_torch
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
snake_case : Union[str, Any] = feat_extract.model_input_names[0]
snake_case : Dict = BatchFeature({input_name: speech_inputs} )
snake_case : str = feat_extract.pad(_UpperCamelCase ,padding="""longest""" ,return_tensors="""np""" )[input_name]
snake_case : Union[str, Any] = feat_extract.pad(_UpperCamelCase ,padding="""longest""" ,return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
snake_case : List[Any] = feat_extract.model_input_names[0]
snake_case : Optional[int] = BatchFeature({input_name: speech_inputs} )
snake_case : Optional[Any] = feat_extract.pad(_UpperCamelCase ,padding="""longest""" ,return_tensors="""np""" )[input_name]
snake_case : Optional[int] = feat_extract.pad(_UpperCamelCase ,padding="""longest""" ,return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = self.feat_extract_dict
snake_case : Tuple = True
snake_case : Optional[int] = self.feature_extraction_class(**_UpperCamelCase )
snake_case : str = self.feat_extract_tester.prepare_inputs_for_common()
snake_case : Optional[int] = [len(_UpperCamelCase ) for x in speech_inputs]
snake_case : Optional[int] = feat_extract.model_input_names[0]
snake_case : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
snake_case : Optional[int] = feat_extract.pad(_UpperCamelCase ,padding="""longest""" ,return_tensors="""np""" )
self.assertIn("""attention_mask""" ,_UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_UpperCamelCase )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.feat_extract_dict
snake_case : Optional[int] = True
snake_case : Union[str, Any] = self.feature_extraction_class(**_UpperCamelCase )
snake_case : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
snake_case : Any = [len(_UpperCamelCase ) for x in speech_inputs]
snake_case : Union[str, Any] = feat_extract.model_input_names[0]
snake_case : Optional[Any] = BatchFeature({input_name: speech_inputs} )
snake_case : Optional[int] = min(_UpperCamelCase )
snake_case : Optional[Any] = feat_extract.pad(
_UpperCamelCase ,padding="""max_length""" ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,return_tensors="""np""" )
self.assertIn("""attention_mask""" ,_UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 36 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __SCREAMING_SNAKE_CASE ( __A , __A ):
lowerCamelCase_ = "dinat"
lowerCamelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : int=[3, 4, 6, 5] , UpperCAmelCase__ : str=[2, 4, 8, 16] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase__ : List[str]=3.0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[Any]=1E-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase : Any =patch_size
lowercase : Union[str, Any] =num_channels
lowercase : List[str] =embed_dim
lowercase : List[Any] =depths
lowercase : Any =len(_UpperCamelCase )
lowercase : str =num_heads
lowercase : Tuple =kernel_size
lowercase : Optional[int] =dilations
lowercase : Optional[int] =mlp_ratio
lowercase : Union[str, Any] =qkv_bias
lowercase : int =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : int =drop_path_rate
lowercase : Optional[int] =hidden_act
lowercase : Union[str, Any] =layer_norm_eps
lowercase : Any =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : str =int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
lowercase : Any =layer_scale_init_value
lowercase : Optional[int] =['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_UpperCamelCase ) + 1 )]
lowercase , lowercase : Any =get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names )
| 92 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__UpperCamelCase = logging.getLogger(__name__)
def _a ( ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
__snake_case : Optional[int] = parser.parse_args()
return args
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
def fn(_lowerCamelCase ):
return tokenizer(examples["""text"""] )
return fn
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
__snake_case : Optional[int] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
__snake_case : List[str] = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
__snake_case : Optional[int] = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
__snake_case : Optional[Any] = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__snake_case : Optional[int] = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
__snake_case : Union[str, Any] = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__snake_case : List[str] = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
__snake_case : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__snake_case : Optional[int] = tokenize_function(SCREAMING_SNAKE_CASE__ )
__snake_case : List[str] = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCamelCase ):
# Concatenate all texts.
__snake_case : Dict = {k: sum(examples[k] , [] ) for k in examples.keys()}
__snake_case : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__snake_case : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__snake_case : Any = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__snake_case : Tuple = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
__snake_case : str = 0
__snake_case : Tuple = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
__snake_case : List[str] = grouped_dataset[shard : shard + args.shard_size]
__snake_case : Optional[int] = len(dataset_snapshot["""input_ids"""] )
__snake_case : Any = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
__snake_case : Dict = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__snake_case : str = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__UpperCamelCase = parse_args()
main(args)
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__SCREAMING_SNAKE_CASE = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Tuple=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =create_model(
'HTSAT-tiny' ,'roberta' ,SCREAMING_SNAKE_CASE__ ,precision='fp32' ,device='cuda:0' if torch.cuda.is_available() else 'cpu' ,enable_fusion=SCREAMING_SNAKE_CASE__ ,fusion_type='aff_2d' if enable_fusion else None ,)
return model, model_cfg
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ={}
SCREAMING_SNAKE_CASE_ : Tuple =r'.*sequential.(\d+).*'
SCREAMING_SNAKE_CASE_ : List[Any] =r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_ : Dict =key.replace(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if re.match(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_ : Any =re.match(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).group(1 )
SCREAMING_SNAKE_CASE_ : str =key.replace(F"""sequential.{sequential_layer}.""" ,F"""layers.{int(SCREAMING_SNAKE_CASE__ )//3}.linear.""" )
elif re.match(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE_ : List[str] =int(re.match(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_ : Optional[Any] =1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_ : str =key.replace(F"""_projection.{projecton_layer}.""" ,F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_ : Union[str, Any] =value
SCREAMING_SNAKE_CASE_ : str =mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_ : Union[str, Any] =mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_ : Optional[Any] =mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_ : List[str] =mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_ : List[Any] =query_layer
SCREAMING_SNAKE_CASE_ : Optional[Any] =key_layer
SCREAMING_SNAKE_CASE_ : Dict =value_layer
else:
SCREAMING_SNAKE_CASE_ : List[Any] =value
return model_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =init_clap(SCREAMING_SNAKE_CASE__ ,enable_fusion=SCREAMING_SNAKE_CASE__ )
clap_model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] =clap_model.state_dict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =rename_state_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : int =ClapConfig()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =enable_fusion
SCREAMING_SNAKE_CASE_ : str =ClapModel(SCREAMING_SNAKE_CASE__ )
# ignore the spectrogram embedding layer
model.load_state_dict(SCREAMING_SNAKE_CASE__ ,strict=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
transformers_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 220 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A_:
"""simple docstring"""
def __init__( self , A , A=3 , A=32 , A=3 , A=10 , A=[8, 16, 32, 64] , A=[1, 1, 2, 1] , A=True , A=True , A="relu" , A=3 , A=None , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=1 , ):
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : List[Any] = embeddings_size
_lowerCamelCase : Optional[int] = hidden_sizes
_lowerCamelCase : List[str] = depths
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : int = num_labels
_lowerCamelCase : Dict = scope
_lowerCamelCase : List[Any] = len(_UpperCamelCase )
_lowerCamelCase : Optional[int] = out_features
_lowerCamelCase : Dict = out_indices
_lowerCamelCase : Tuple = num_groups
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : str = BitModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCamelCase : Tuple = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Optional[int] = BitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCamelCase : Dict = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : Optional[Any] = BitBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = BitBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_(__A , __A , unittest.TestCase ):
"""simple docstring"""
a_ : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a_ : List[str] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a_ : List[str] = False
a_ : Any = False
a_ : List[str] = False
a_ : Tuple = False
a_ : Dict = False
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = BitModelTester(self )
_lowerCamelCase : int = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def _lowerCAmelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ):
return
@unittest.skip(reason='Bit does not output attentions' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(_UpperCamelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def _lowerCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(config=_UpperCamelCase )
for name, module in model.named_modules():
if isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def _lowerCAmelCase ( self ):
def check_hidden_states_output(A , A , A ):
_lowerCamelCase : Tuple = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase : Optional[int] = layer_type
_lowerCamelCase : Union[str, Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def _lowerCAmelCase ( self ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = BitModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_(unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCamelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : List[str] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**_UpperCamelCase )
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_lowerCamelCase : Dict = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@require_torch
class A_(__A , unittest.TestCase ):
"""simple docstring"""
a_ : Tuple = (BitBackbone,) if is_torch_available() else ()
a_ : Optional[Any] = BitConfig
a_ : Optional[int] = False
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = BitModelTester(self )
| 437 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = "pytorch_model.bin"
@dataclasses.dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE__ :str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE__ :Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE__ :Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE__ :Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE__ :Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE__ :Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE__ :Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE__ :Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE__ :Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE__ :Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = datasets.concatenate_datasets([infer_input, infer_output] ,axis=1 )
if args.do_filter_by_confidence:
_UpperCamelCase : Union[str, Any] = dataset.filter(lambda lowercase_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_UpperCamelCase : Tuple = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : List[str] = dataset.sort("probability" ,reverse=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : int = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
_UpperCamelCase : Any = dataset.remove_columns(["label", "probability"] )
_UpperCamelCase : Dict = dataset.rename_column("prediction" ,"label" )
_UpperCamelCase : Dict = dataset.map(lambda lowercase_ : {"label": idalabel[example["label"]]} )
_UpperCamelCase : Any = dataset.shuffle(seed=args.seed )
_UpperCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ ,index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_UpperCamelCase : Dict = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : List[str] = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ ,infer_file=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : List[Any] = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : str = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Sanity checks
_UpperCamelCase : str = {}
_UpperCamelCase : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_UpperCamelCase : Union[str, Any] = args.train_file
_UpperCamelCase : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_UpperCamelCase : Union[str, Any] = args.eval_file
for key in data_files:
_UpperCamelCase : Optional[int] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
_UpperCamelCase : str = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_UpperCamelCase : List[str] = F'''{args.output_dir}/self-train_iter-{{}}'''.format
_UpperCamelCase : str = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir ,exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Tuple = False
# Show the progress bar
_UpperCamelCase : Optional[Any] = tqdm(range(args.max_selftrain_iterations ) ,disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 ,int(args.max_selftrain_iterations ) ):
_UpperCamelCase : int = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_UpperCamelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ ,"stage-1" )
_UpperCamelCase : Any = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
_UpperCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,"best-checkpoint" ,SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,)
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" ,SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info("Self-training job completed: iteration: %d, stage: 1." ,SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_UpperCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,"best-checkpoint" )
_UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ ,"stage-2" )
# Update arguments_dict
_UpperCamelCase : Optional[int] = model_path
_UpperCamelCase : Optional[Any] = data_files["train"]
_UpperCamelCase : List[Any] = current_output_dir
_UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ ,"best-checkpoint" ,SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,)
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" ,SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info("Self-training job completed: iteration: %d, stage: 2." ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : int = iteration
_UpperCamelCase : str = data_dir_format(iteration + 1 )
_UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ ,"best-checkpoint" ) )
_UpperCamelCase : Any = config.idalabel
_UpperCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,"eval_results_best-checkpoint.json" )
_UpperCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,"test_results_best-checkpoint.json" )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ ,"r" ) as f:
_UpperCamelCase : str = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
_UpperCamelCase : Any = os.path.join(SCREAMING_SNAKE_CASE__ ,"infer_output_best-checkpoint.csv" )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
_UpperCamelCase : Optional[int] = load_dataset(args.data_file_extension ,data_files={"data": data_files["infer"]} )["data"]
_UpperCamelCase : Any = load_dataset("csv" ,data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ ,os.path.join(SCREAMING_SNAKE_CASE__ ,F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ ,os.path.join(SCREAMING_SNAKE_CASE__ ,F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
_UpperCamelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_UpperCamelCase : str = eval_result
if best_iteration is None:
_UpperCamelCase : Optional[Any] = new_iteration
_UpperCamelCase : Optional[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_UpperCamelCase : Union[str, Any] = new_iteration
_UpperCamelCase : Any = new_eval_result
_UpperCamelCase : Any = 0
else:
if new_eval_result == best_eval_result:
_UpperCamelCase : Optional[int] = new_iteration
_UpperCamelCase : Optional[int] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_UpperCamelCase : int = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" ,SCREAMING_SNAKE_CASE__ )
logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ ,F'''eval_results_iter-{iteration}.json''' ) ,os.path.join(SCREAMING_SNAKE_CASE__ ,"eval_results_best-iteration.json" ) ,)
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" ,args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ ,F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) ,os.path.join(SCREAMING_SNAKE_CASE__ ,"eval_results_best-iteration.json" ) ,)
| 624 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase :
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def _a ():
"""simple docstring"""
_UpperCamelCase =Node(1 )
_UpperCamelCase =Node(2 )
_UpperCamelCase =Node(3 )
_UpperCamelCase =Node(4 )
_UpperCamelCase =Node(5 )
return tree
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
if root is None:
return output
_UpperCamelCase =deque([root] )
while process_queue:
_UpperCamelCase =process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
def populate_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
def populate_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root is None:
return []
_UpperCamelCase =[]
_UpperCamelCase =0
_UpperCamelCase =height(SCREAMING_SNAKE_CASE__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
_UpperCamelCase =1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
_UpperCamelCase =0
return output
def _a (): # Main function for testing.
"""simple docstring"""
_UpperCamelCase =make_tree()
print(f'''In-order Traversal: {inorder(SCREAMING_SNAKE_CASE__ )}''' )
print(f'''Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE__ )}''' )
print(f'''Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE__ )}''' , '''\n''' )
print(f'''Height of Tree: {height(SCREAMING_SNAKE_CASE__ )}''' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(SCREAMING_SNAKE_CASE__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(SCREAMING_SNAKE_CASE__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE__ , level=SCREAMING_SNAKE_CASE__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 404 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_A = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, '''schedulers/''' ) )
lowerCAmelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase, '''src/diffusers/schedulers/scheduling_ddpm.py''' ), os.path.join(self.diffusers_dir, '''schedulers/scheduling_ddpm.py''' ), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase_ = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
lowerCAmelCase_ = black.format_str(_UpperCamelCase, mode=_UpperCamelCase )
lowerCAmelCase_ = os.path.join(self.diffusers_dir, '''new_code.py''' )
with open(_UpperCamelCase, '''w''', newline='''\n''' ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=_UpperCamelCase )
with open(_UpperCamelCase, '''r''' ) as f:
self.assertTrue(f.read(), _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_UpperCamelCase, _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''', '''DDPMSchedulerOutput''', REFERENCE_CODE + '''\n''', )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''', '''DDPMSchedulerOutput''', _UpperCamelCase, )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''', '''TestSchedulerOutput''', re.sub('''DDPM''', '''Test''', _UpperCamelCase ), )
# Copy consistency with a really long name
lowerCAmelCase_ = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}", f"{long_class_name}SchedulerOutput", re.sub('''Bert''', _UpperCamelCase, _UpperCamelCase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''', '''TestSchedulerOutput''', _UpperCamelCase, overwrite_result=re.sub('''DDPM''', '''Test''', _UpperCamelCase ), )
| 431 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 | 0 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a__ : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 51 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
SCREAMING_SNAKE_CASE__ : List[Any] = get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
def __init__( self , A_ = None ):
_UpperCAmelCase : Dict = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Dict = Extractor
def __snake_case( self , A_ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Optional[int] = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def __snake_case( self , A_ , A_ ):
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def __snake_case( self , A_ , A_ = False ):
_UpperCAmelCase : int = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
_UpperCAmelCase : List[str] = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class _SCREAMING_SNAKE_CASE ( __A ):
@classmethod
@abstractmethod
def __snake_case( cls , A_ , **A_ ):
...
@staticmethod
@abstractmethod
def __snake_case( A_ , A_ ):
...
class _SCREAMING_SNAKE_CASE ( __A , __A ):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __snake_case( A_ , A_ ):
with open(_UpperCamelCase , """rb""" ) as f:
return f.read(_UpperCamelCase )
@classmethod
def __snake_case( cls , A_ , A_ = b"" ):
if not magic_number:
_UpperCAmelCase : Any = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : List[Any] = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class _SCREAMING_SNAKE_CASE ( __A ):
@classmethod
def __snake_case( cls , A_ , **A_ ):
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def __snake_case( A_ , A_ ):
def resolved(A_ ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(A_ , A_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(A_ , A_ ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : Optional[Any] = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
_UpperCAmelCase : Optional[int] = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def __snake_case( A_ , A_ ):
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
_UpperCAmelCase : Dict = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class _SCREAMING_SNAKE_CASE ( __A ):
__SCREAMING_SNAKE_CASE = [b"\x1F\x8B"]
@staticmethod
def __snake_case( A_ , A_ ):
with gzip.open(_UpperCamelCase , """rb""" ) as gzip_file:
with open(_UpperCamelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class _SCREAMING_SNAKE_CASE ( __A ):
__SCREAMING_SNAKE_CASE = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def __snake_case( cls , A_ , A_ = b"" ):
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , """rb""" ) as fp:
_UpperCAmelCase : List[str] = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Any = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
_UpperCAmelCase : Any = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __snake_case( A_ , A_ ):
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , """r""" ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class _SCREAMING_SNAKE_CASE ( __A ):
__SCREAMING_SNAKE_CASE = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def __snake_case( A_ , A_ ):
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class _SCREAMING_SNAKE_CASE ( __A ):
__SCREAMING_SNAKE_CASE = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def __snake_case( A_ , A_ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
_UpperCAmelCase : Optional[int] = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class _SCREAMING_SNAKE_CASE ( __A ):
__SCREAMING_SNAKE_CASE = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def __snake_case( A_ , A_ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
_UpperCAmelCase : Tuple = zstd.ZstdDecompressor()
with open(_UpperCamelCase , """rb""" ) as ifh, open(_UpperCamelCase , """wb""" ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class _SCREAMING_SNAKE_CASE ( __A ):
__SCREAMING_SNAKE_CASE = [b"\x42\x5A\x68"]
@staticmethod
def __snake_case( A_ , A_ ):
with bza.open(_UpperCamelCase , """rb""" ) as compressed_file:
with open(_UpperCamelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class _SCREAMING_SNAKE_CASE ( __A ):
__SCREAMING_SNAKE_CASE = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def __snake_case( A_ , A_ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , """r""" ) as archive:
archive.extractall(_UpperCamelCase )
class _SCREAMING_SNAKE_CASE ( __A ):
__SCREAMING_SNAKE_CASE = [b"\x04\x22\x4D\x18"]
@staticmethod
def __snake_case( A_ , A_ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(_UpperCamelCase , """rb""" ) as compressed_file:
with open(_UpperCamelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __snake_case( cls ):
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __snake_case( A_ , A_ ):
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def __snake_case( cls , A_ , A_ = False ):
warnings.warn(
"""Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use \'infer_extractor_format\' instead.""" , category=_UpperCamelCase , )
_UpperCAmelCase : Optional[Any] = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __snake_case( cls , A_ ): # <Added version="2.4.0"/>
_UpperCAmelCase : str = cls._get_magic_number_max_length()
_UpperCAmelCase : Union[str, Any] = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def __snake_case( cls , A_ , A_ , A_ = None , A_ = "deprecated" , ):
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
_UpperCAmelCase : str = str(Path(_UpperCamelCase ).with_suffix(""".lock""" ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
"""Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use \'extractor_format\' instead.""" , category=_UpperCamelCase , )
_UpperCAmelCase : Optional[int] = extractor if extractor != """deprecated""" else extractor_format
else:
_UpperCAmelCase : str = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
"""Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
| 643 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : str=None , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : str ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] ) ->Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
snake_case_ = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , *_UpperCamelCase : List[Any] ) ->Union[str, Any]:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = self.seq_length // 2
snake_case_ = 0
# first forward pass
snake_case_, snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case_ = ids_tensor((1,) , _UpperCamelCase ).item() + 1
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case_ = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) ->int:
snake_case_ = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False ) ->Dict:
snake_case_ = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , *_UpperCamelCase : Dict ) ->Dict:
snake_case_ = BioGptModel(_UpperCamelCase )
snake_case_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , *_UpperCamelCase : List[str] ) ->int:
snake_case_ = self.num_labels
snake_case_ = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = BioGptModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : str ) ->int:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = '''left'''
# Define PAD Token = EOS Token = 50256
snake_case_ = tokenizer.eos_token
snake_case_ = model.config.eos_token_id
# use different length sentences to test batching
snake_case_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
snake_case_ = inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
snake_case_ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase )
snake_case_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
snake_case_ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__( self : Optional[int] ) ->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = '''multi_label_classification'''
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : int ) ->Any:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
snake_case_ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 4_2_3_8_4
snake_case_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
snake_case_ = model.generate(
**_UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCamelCase , )
snake_case_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Any, lowerCAmelCase : Any, ) -> Optional[Any]:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A = cst_fwd.get(SCREAMING_SNAKE_CASE__, np.inf )
A = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A = new_cost_f
A = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : List[str] ) -> Any:
A = -1
A = set()
A = set()
A = {source: 0}
A = {destination: 0}
A = {source: None}
A = {destination: None}
A = PriorityQueue()
A = PriorityQueue()
A = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A , A = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE__ )
A , A = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE__ )
A = pass_and_relaxation(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, )
A = pass_and_relaxation(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A = shortest_distance
return shortest_path_distance
_UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ = (boundary[1] - boundary[0]) / steps
snake_case_ = boundary[0]
snake_case_ = boundary[1]
snake_case_ = make_points(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE__ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
return y
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = a + h
while x < (b - h):
yield x
snake_case_ = x + h
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # enter your function here
snake_case_ = (x - 0) * (x - 0)
return y
def __SCREAMING_SNAKE_CASE ():
snake_case_ = 0.0 # Lower bound of integration
snake_case_ = 1.0 # Upper bound of integration
snake_case_ = 10.0 # define number of steps or resolution
snake_case_ = [a, b] # define boundary of integration
snake_case_ = method_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main() | 39 | 0 |
__lowercase : List[Any] = range(2, 20 + 1)
__lowercase : Dict = [10**k for k in range(ks[-1] + 1)]
__lowercase : str = {}
def lowercase ( __A : str , __A : Any , __A : Union[str, Any] , __A : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) )
snake_case : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case : Dict = 0, 0
snake_case : Union[str, Any] = n - i
snake_case : Union[str, Any] = memo.get(SCREAMING_SNAKE_CASE__ )
if sub_memo is not None:
snake_case : List[Any] = sub_memo.get(SCREAMING_SNAKE_CASE__ )
if jumps is not None and len(SCREAMING_SNAKE_CASE__ ) > 0:
# find and make the largest jump without going over
snake_case : Tuple = -1
for _k in range(len(SCREAMING_SNAKE_CASE__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case : List[str] = _k
break
if max_jump >= 0:
snake_case , snake_case , snake_case : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case : Any = diff + c
for j in range(min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) ):
snake_case , snake_case : Dict = divmod(SCREAMING_SNAKE_CASE__ , 10 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case : Tuple = []
else:
snake_case : Optional[int] = {c: []}
snake_case : List[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case , snake_case : Union[str, Any] = next_term(SCREAMING_SNAKE_CASE__ , k - 1 , i + dn , SCREAMING_SNAKE_CASE__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case , snake_case : Optional[int] = compute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + dn , SCREAMING_SNAKE_CASE__ )
diff += _diff
dn += terms_jumped
snake_case : Optional[int] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case : Any = 0
while j < len(SCREAMING_SNAKE_CASE__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE__ , (diff, dn, k) )
return (diff, dn)
def lowercase ( __A : int , __A : Any , __A : Optional[Any] , __A : Tuple ) -> Tuple:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE__ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case : Union[str, Any] = i
snake_case , snake_case , snake_case : Union[str, Any] = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case : str = ds_c + ds_b
diff += addend
snake_case : List[Any] = 0
for j in range(SCREAMING_SNAKE_CASE__ ):
snake_case : int = a_i[j] + addend
snake_case , snake_case : Dict = divmod(SCREAMING_SNAKE_CASE__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return diff, i - start_i
def lowercase ( __A : Any , __A : List[Any] , __A : Optional[Any] ) -> Dict:
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case : int = digits[j] + addend
if s >= 10:
snake_case , snake_case : List[str] = divmod(SCREAMING_SNAKE_CASE__ , 10 )
snake_case : List[str] = addend // 10 + quotient
else:
snake_case : Any = s
snake_case : Any = addend // 10
if addend == 0:
break
while addend > 0:
snake_case , snake_case : List[Any] = divmod(SCREAMING_SNAKE_CASE__ , 10 )
digits.append(SCREAMING_SNAKE_CASE__ )
def lowercase ( __A : Optional[Any] = 10**15 ) -> Dict:
'''simple docstring'''
snake_case : List[str] = [1]
snake_case : Dict = 1
snake_case : Dict = 0
while True:
snake_case , snake_case : Dict = next_term(SCREAMING_SNAKE_CASE__ , 20 , i + dn , SCREAMING_SNAKE_CASE__ )
dn += terms_jumped
if dn == n - i:
break
snake_case : Dict = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 39 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __SCREAMING_SNAKE_CASE ( __A ):
lowerCamelCase_ = "informer"
lowerCamelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = None , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.05 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : str = "prob" , UpperCAmelCase__ : int = 5 , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
# time series specific configuration
lowercase : str =prediction_length
lowercase : int =context_length or prediction_length
lowercase : int =distribution_output
lowercase : List[Any] =loss
lowercase : Any =input_size
lowercase : List[Any] =num_time_features
lowercase : Optional[int] =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase : Union[str, Any] =scaling
lowercase : Optional[int] =num_dynamic_real_features
lowercase : str =num_static_real_features
lowercase : int =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase : int =cardinality
else:
lowercase : Dict =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase : Tuple =embedding_dimension
else:
lowercase : Tuple =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase : Optional[Any] =num_parallel_samples
# Transformer architecture configuration
lowercase : Optional[Any] =input_size * len(self.lags_sequence ) + self._number_of_features
lowercase : str =d_model
lowercase : Dict =encoder_attention_heads
lowercase : Optional[int] =decoder_attention_heads
lowercase : Any =encoder_ffn_dim
lowercase : Optional[int] =decoder_ffn_dim
lowercase : Tuple =encoder_layers
lowercase : Union[str, Any] =decoder_layers
lowercase : List[Any] =dropout
lowercase : Optional[Any] =attention_dropout
lowercase : Optional[int] =activation_dropout
lowercase : Optional[Any] =encoder_layerdrop
lowercase : int =decoder_layerdrop
lowercase : Dict =activation_function
lowercase : Union[str, Any] =init_std
lowercase : Any =use_cache
# Informer
lowercase : int =attention_type
lowercase : int =sampling_factor
lowercase : Optional[int] =distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 92 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 | 0 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( __A , __A , __A ):
lowercase__: Optional[int] = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] = None , __magic_name__ : int = 5_02_57 , __magic_name__ : int = 10_24 , __magic_name__ : int = 7_68 , __magic_name__ : int = 12 , __magic_name__ : int = 12 , __magic_name__ : Optional[int] = None , __magic_name__ : str = "gelu_new" , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 1E-5 , __magic_name__ : float = 0.02 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
__snake_case : Dict = prefix_inner_dim
__snake_case : Tuple = prefix_hidden_dim
__snake_case : Optional[int] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__snake_case : Any = (
nn.Linear(self.prefix_hidden_dim , _UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__snake_case : int = GPTaConfig(
vocab_size=_UpperCamelCase , n_positions=_UpperCamelCase , n_embd=_UpperCamelCase , n_layer=_UpperCamelCase , n_head=_UpperCamelCase , n_inner=_UpperCamelCase , activation_function=_UpperCamelCase , resid_pdrop=_UpperCamelCase , embd_pdrop=_UpperCamelCase , attn_pdrop=_UpperCamelCase , layer_norm_epsilon=_UpperCamelCase , initializer_range=_UpperCamelCase , scale_attn_weights=_UpperCamelCase , use_cache=_UpperCamelCase , scale_attn_by_inverse_layer_idx=_UpperCamelCase , reorder_and_upcast_attn=_UpperCamelCase , )
__snake_case : Tuple = GPTaLMHeadModel(_UpperCamelCase )
def lowercase__ ( self : str , __magic_name__ : torch.Tensor , __magic_name__ : torch.Tensor , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.transformer.transformer.wte(_UpperCamelCase )
__snake_case : str = self.encode_prefix(_UpperCamelCase )
__snake_case : List[str] = self.decode_prefix(_UpperCamelCase )
__snake_case : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__snake_case : Optional[int] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__snake_case : Tuple = torch.cat((dummy_token, input_ids) , dim=1 )
__snake_case : Any = self.transformer(inputs_embeds=_UpperCamelCase , labels=_UpperCamelCase , attention_mask=_UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowercase__ ( self : Any , __magic_name__ : int , __magic_name__ : torch.device ) -> torch.Tensor:
"""simple docstring"""
return torch.zeros(_UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=_UpperCamelCase )
def lowercase__ ( self : int , __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
return self.encode_prefix(_UpperCamelCase )
@torch.no_grad()
def lowercase__ ( self : List[str] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = torch.split(_UpperCamelCase , 1 , dim=0 )
__snake_case : List[Any] = []
__snake_case : Optional[int] = []
for feature in features:
__snake_case : List[Any] = self.decode_prefix(feature.to(_UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
__snake_case , __snake_case : int = self.generate_beam(
input_embeds=_UpperCamelCase , device=_UpperCamelCase , eos_token_id=_UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__snake_case : Union[str, Any] = torch.stack(_UpperCamelCase )
__snake_case : List[Any] = torch.stack(_UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowercase__ ( self : Any , __magic_name__ : List[Any]=None , __magic_name__ : int=None , __magic_name__ : List[str]=None , __magic_name__ : int = 5 , __magic_name__ : int = 67 , __magic_name__ : float = 1.0 , __magic_name__ : Optional[int] = None , ) -> List[str]:
"""simple docstring"""
__snake_case : int = eos_token_id
__snake_case : int = None
__snake_case : Union[str, Any] = None
__snake_case : List[str] = torch.ones(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.int )
__snake_case : Union[str, Any] = torch.zeros(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.bool )
if input_embeds is not None:
__snake_case : int = input_embeds
else:
__snake_case : str = self.transformer.transformer.wte(_UpperCamelCase )
for i in range(_UpperCamelCase ):
__snake_case : Tuple = self.transformer(inputs_embeds=_UpperCamelCase )
__snake_case : str = outputs.logits
__snake_case : Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__snake_case : Dict = logits.softmax(-1 ).log()
if scores is None:
__snake_case , __snake_case : Optional[int] = logits.topk(_UpperCamelCase , -1 )
__snake_case : Tuple = generated.expand(_UpperCamelCase , *generated.shape[1:] )
__snake_case , __snake_case : Optional[int] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__snake_case : List[Any] = next_tokens
else:
__snake_case : Any = tokens.expand(_UpperCamelCase , *tokens.shape[1:] )
__snake_case : Dict = torch.cat((tokens, next_tokens) , dim=1 )
else:
__snake_case : Optional[int] = -float(np.inf )
__snake_case : List[str] = 0
__snake_case : Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__snake_case : List[str] = scores_sum / seq_lengths[:, None]
__snake_case , __snake_case : Union[str, Any] = scores_sum_average.view(-1 ).topk(_UpperCamelCase , -1 )
__snake_case : Optional[int] = next_tokens // scores_sum.shape[1]
__snake_case : List[str] = seq_lengths[next_tokens_source]
__snake_case : Dict = next_tokens % scores_sum.shape[1]
__snake_case : int = next_tokens.unsqueeze(1 )
__snake_case : int = tokens[next_tokens_source]
__snake_case : int = torch.cat((tokens, next_tokens) , dim=1 )
__snake_case : Optional[int] = generated[next_tokens_source]
__snake_case : str = scores_sum_average * seq_lengths
__snake_case : Dict = is_stopped[next_tokens_source]
__snake_case : Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__snake_case : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
__snake_case : Any = is_stopped + next_tokens.eq(_UpperCamelCase ).squeeze()
if is_stopped.all():
break
__snake_case : List[Any] = scores / seq_lengths
__snake_case : Optional[int] = scores.argsort(descending=_UpperCamelCase )
# tokens tensors are already padded to max_seq_length
__snake_case : Optional[int] = [tokens[i] for i in order]
__snake_case : List[Any] = torch.stack(_UpperCamelCase , dim=0 )
__snake_case : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 26 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 0 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__SCREAMING_SNAKE_CASE = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__SCREAMING_SNAKE_CASE = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__SCREAMING_SNAKE_CASE = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__SCREAMING_SNAKE_CASE = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__SCREAMING_SNAKE_CASE = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' ,SCREAMING_SNAKE_CASE__ )
return [m.group(0 ) for m in matches]
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ : Tuple ={
config.replace('Config' ,'' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE_ : List[str] =collections.defaultdict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =collections.defaultdict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Tuple =collections.defaultdict(SCREAMING_SNAKE_CASE__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE_ : Any =None
if _re_tf_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] =tf_models
SCREAMING_SNAKE_CASE_ : Any =_re_tf_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =flax_models
SCREAMING_SNAKE_CASE_ : int =_re_flax_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
SCREAMING_SNAKE_CASE_ : Any =pt_models
SCREAMING_SNAKE_CASE_ : Optional[Any] =_re_pt_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE__ ) > 0:
if attr_name in model_prefix_to_model_type:
SCREAMING_SNAKE_CASE_ : Any =True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE_ : Optional[Any] =''.join(camel_case_split(SCREAMING_SNAKE_CASE__ )[:-1] )
SCREAMING_SNAKE_CASE_ : str =set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
SCREAMING_SNAKE_CASE_ : str =list(SCREAMING_SNAKE_CASE__ )
all_models.sort()
SCREAMING_SNAKE_CASE_ : str ={'model_type': all_models}
SCREAMING_SNAKE_CASE_ : Any =[pt_models[t] for t in all_models]
SCREAMING_SNAKE_CASE_ : List[str] =[tf_models[t] for t in all_models]
SCREAMING_SNAKE_CASE_ : Any =[flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
SCREAMING_SNAKE_CASE_ : Any ={}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE_ : List[Any] ='AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
SCREAMING_SNAKE_CASE_ : Any ='AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE_ : List[Any] ='AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
SCREAMING_SNAKE_CASE_ : Dict ='AutoTokenizer'
SCREAMING_SNAKE_CASE_ : List[Any] =[processors[t] for t in all_models]
return pd.DataFrame(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =[
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
SCREAMING_SNAKE_CASE_ : Dict =[model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
SCREAMING_SNAKE_CASE_ : List[Any] =[auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
# The type of pipeline may not exist in this framework
if not hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
continue
# First extract all model_names
SCREAMING_SNAKE_CASE_ : str =[]
for name in getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).values():
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
model_names.append(SCREAMING_SNAKE_CASE__ )
else:
model_names.extend(list(SCREAMING_SNAKE_CASE__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =get_frameworks_table()
SCREAMING_SNAKE_CASE_ : List[Any] =Dataset.from_pandas(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : List[Any] =hf_hub_download(
'huggingface/transformers-metadata' ,'pipeline_tags.json' ,repo_type='dataset' ,token=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : List[Any] =Dataset.from_json(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =update_pipeline_and_auto_class_table(SCREAMING_SNAKE_CASE__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
SCREAMING_SNAKE_CASE_ : List[str] =sorted(table.keys() )
SCREAMING_SNAKE_CASE_ : int =pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
SCREAMING_SNAKE_CASE_ : List[str] =Dataset.from_pandas(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE__ ,'frameworks.json' ) )
tags_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE__ ,'pipeline_tags.json' ) )
if commit_sha is not None:
SCREAMING_SNAKE_CASE_ : List[Any] =(
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
SCREAMING_SNAKE_CASE_ : List[Any] ='Update'
upload_folder(
repo_id='huggingface/transformers-metadata' ,folder_path=SCREAMING_SNAKE_CASE__ ,repo_type='dataset' ,token=SCREAMING_SNAKE_CASE__ ,commit_message=SCREAMING_SNAKE_CASE__ ,)
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ={tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
SCREAMING_SNAKE_CASE_ : Dict =transformers_module.pipelines.SUPPORTED_TASKS
SCREAMING_SNAKE_CASE_ : List[str] =[]
for key in pipeline_tasks:
if key not in in_table:
SCREAMING_SNAKE_CASE_ : Optional[int] =pipeline_tasks[key]['pt']
if isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple) ):
SCREAMING_SNAKE_CASE_ : Dict =model[0]
SCREAMING_SNAKE_CASE_ : List[str] =model.__name__
if model not in in_table.values():
missing.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
SCREAMING_SNAKE_CASE_ : Any =', '.join(SCREAMING_SNAKE_CASE__ )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 220 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 0 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : str , __a : str ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowerCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b"
_lowerCamelCase : int = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:]
_lowerCamelCase : List[str] = max(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE__ ) , b_binary.zfill(SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "pytorch_model.bin.index.json"
lowerCamelCase__ = "adapter_config.json"
lowerCamelCase__ = "adapter_model.bin"
lowerCamelCase__ = "adapter_model.safetensors"
lowerCamelCase__ = "tf_model.h5"
lowerCamelCase__ = "tf_model.h5.index.json"
lowerCamelCase__ = "model.ckpt"
lowerCamelCase__ = "flax_model.msgpack"
lowerCamelCase__ = "flax_model.msgpack.index.json"
lowerCamelCase__ = "model.safetensors"
lowerCamelCase__ = "model.safetensors.index.json"
lowerCamelCase__ = "config.json"
lowerCamelCase__ = "preprocessor_config.json"
lowerCamelCase__ = FEATURE_EXTRACTOR_NAME
lowerCamelCase__ = "generation_config.json"
lowerCamelCase__ = "modelcard.json"
lowerCamelCase__ = "▁"
lowerCamelCase__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if version.parse(SCREAMING_SNAKE_CASE__ ) < version.parse(SCREAMING_SNAKE_CASE__ ):
if "dev" in min_version:
_UpperCamelCase : Optional[Any] = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
_UpperCamelCase : Optional[int] = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 624 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
_UpperCamelCase =[]
_UpperCamelCase =0
_UpperCamelCase =sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
__lowerCamelCase : Optional[int] = [3, 34, 4, 12, 5, 2]
__lowerCamelCase : Dict = 9
__lowerCamelCase : str = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 404 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_A = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
_A = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = ''' Hello world! cécé herlolip'''
_A = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
lowerCAmelCase_ = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A=None ):
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase_ = torch.hub.load('''pytorch/fairseq''' , SCREAMING_SNAKE_CASE__ ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace('''.''' , '''-''' )
lowerCAmelCase_ = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = bart.predict('''mnli''' , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCAmelCase_ = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , '''lm_head''' ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
_A = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 431 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : Optional[Any] = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
from math import factorial
def a__ ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'fifty-two card deck is: {combinations(52, 5)}\n',
)
print(
'If a class of 40 students must be arranged into groups of',
F'4 for group projects, there are {combinations(40, 4)} ways',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'are {combinations(10, 3)} ways that first, second and',
'third place can be awarded.',
)
| 643 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = XGLMTokenizerFast
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : List[Any] = True
def UpperCamelCase ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
A = XGLMTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Any ):
A = '<pad>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase ( self : Optional[int] ):
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(_UpperCamelCase ) , 1008 )
def UpperCamelCase ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def UpperCamelCase ( self : int ):
A = XGLMTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
A = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCamelCase ( self : Any ):
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def UpperCamelCase ( self : int ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCamelCase , f.name )
A = XGLMTokenizer(f.name , keep_accents=_UpperCamelCase )
A = pickle.dumps(_UpperCamelCase )
pickle.loads(_UpperCamelCase )
def UpperCamelCase ( self : Any ):
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'I was born in 92000, and this is falsé.'
A = tokenizer.tokenize(_UpperCamelCase )
A = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
A = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
A = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
A = self.get_rust_tokenizer()
A = tokenizer.encode(_UpperCamelCase )
A = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def UpperCamelCase ( self : List[str] ):
A = 'Hello World!'
A = [2, 31227, 4447, 35]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def UpperCamelCase ( self : Union[str, Any] ):
A = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
A = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def UpperCamelCase ( self : List[Any] ):
# fmt: off
A = {
'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='facebook/xglm-564M' , padding=_UpperCamelCase , )
| 699 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( __A : Dict ) -> Dict:
'''simple docstring'''
def wrapper(*__A : Optional[Any] , **__A : Any ):
snake_case : Tuple = timeit.default_timer()
snake_case : List[str] = func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = timeit.default_timer() - starttime
return delta
snake_case : Optional[int] = func.__name__
return wrapper
def lowercase ( __A : Optional[Any] , __A : Optional[int]=100 , __A : Tuple=None ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = []
snake_case : Optional[int] = seq_shapes or {}
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case : List[str] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(SCREAMING_SNAKE_CASE__ , _ArrayXD ):
snake_case : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Value ):
if v.dtype == "string":
snake_case : Tuple = """The small grey turtle was surprisingly fast when challenged."""
else:
snake_case : Optional[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
while isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
snake_case : Optional[Any] = v.feature
snake_case : List[Any] = seq_shapes[k]
snake_case : Union[str, Any] = np.random.rand(*SCREAMING_SNAKE_CASE__ ).astype(v.dtype )
snake_case : Any = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( __A : List[Any] , __A : List[Any] , __A : Any=100 , __A : List[str]=None ) -> Any:
'''simple docstring'''
snake_case : int = generate_examples(SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes=SCREAMING_SNAKE_CASE__ )
with ArrowWriter(features=SCREAMING_SNAKE_CASE__ , path=SCREAMING_SNAKE_CASE__ ) as writer:
for key, record in dummy_data:
snake_case : str = features.encode_example(SCREAMING_SNAKE_CASE__ )
writer.write(SCREAMING_SNAKE_CASE__ )
snake_case , snake_case : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
snake_case : int = datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE__ , info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE__ ) )
return dataset
| 36 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCamelCase_ = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 14 ):
'''simple docstring'''
if group not in primes:
raise ValueError('''Unsupported Group''' )
lowercase : str =primes[group]['''prime''']
lowercase : Tuple =primes[group]['''generator''']
lowercase : List[str] =int(hexlify(urandom(32 ) ) , base=16 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] =pow(self.generator , self.__private_key , self.prime )
return hex(_UpperCamelCase )[2:]
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Tuple =int(_UpperCamelCase , base=16 )
if not self.is_valid_public_key(_UpperCamelCase ):
raise ValueError('''Invalid public key''' )
lowercase : Union[str, Any] =pow(_UpperCamelCase , self.__private_key , self.prime )
return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest()
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_UpperCamelCase , (prime - 1) // 2 , _UpperCamelCase ) == 1
)
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 14 ):
'''simple docstring'''
lowercase : List[Any] =int(_UpperCamelCase , base=16 )
lowercase : Union[str, Any] =int(_UpperCamelCase , base=16 )
lowercase : List[Any] =primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Invalid public key''' )
lowercase : Optional[int] =pow(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 0 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = np.zeros_like(SCREAMING_SNAKE_CASE__ )
__snake_case : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__snake_case : List[str] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__snake_case : Any = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__snake_case : Optional[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCamelCase = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
__UpperCamelCase = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCamelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCamelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCamelCase = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = ["input_values", "attention_mask"]
def __init__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 16_000 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = False , __UpperCAmelCase = 80 , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = "hann_window" , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 80 , __UpperCAmelCase = 7_600 , __UpperCAmelCase = 1E-10 , __UpperCAmelCase = 2 , __UpperCAmelCase = True , **__UpperCAmelCase , ):
super().__init__(feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] =return_attention_mask
SCREAMING_SNAKE_CASE_ : Dict =num_mel_bins
SCREAMING_SNAKE_CASE_ : Optional[Any] =hop_length
SCREAMING_SNAKE_CASE_ : str =win_length
SCREAMING_SNAKE_CASE_ : List[str] =win_function
SCREAMING_SNAKE_CASE_ : int =frame_signal_scale
SCREAMING_SNAKE_CASE_ : Any =fmin
SCREAMING_SNAKE_CASE_ : Optional[int] =fmax
SCREAMING_SNAKE_CASE_ : List[str] =mel_floor
SCREAMING_SNAKE_CASE_ : Dict =reduction_factor
SCREAMING_SNAKE_CASE_ : Tuple =win_length * sampling_rate // 1_000
SCREAMING_SNAKE_CASE_ : List[Any] =hop_length * sampling_rate // 1_000
SCREAMING_SNAKE_CASE_ : Optional[Any] =optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE_ : Any =(self.n_fft // 2) + 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Any =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 ):
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any =np.array(_UpperCamelCase , np.intaa )
SCREAMING_SNAKE_CASE_ : Optional[int] =[]
for vector, length in zip(_UpperCamelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE_ : List[str] =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =padding_value
normed_input_values.append(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __lowerCamelCase ( self , __UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Tuple =spectrogram(
_UpperCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self._process_audio(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE_ : Dict =None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : int =self._process_audio(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , )
if inputs is None:
return inputs_target
else:
SCREAMING_SNAKE_CASE_ : Any =inputs_target['input_values']
SCREAMING_SNAKE_CASE_ : Optional[int] =inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : int =decoder_attention_mask
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[int] =isinstance(_UpperCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE_ : Any =is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : int =[np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE_ : Optional[int] =np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ : int =speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : Dict =[speech]
# needed to make pad() work on spectrogram inputs
SCREAMING_SNAKE_CASE_ : Optional[int] =self.feature_size
# convert into correct format for padding
if is_target:
SCREAMING_SNAKE_CASE_ : Any =[self._extract_mel_features(_UpperCamelCase ) for waveform in speech]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =BatchFeature({'input_values': features} )
SCREAMING_SNAKE_CASE_ : Tuple =self.num_mel_bins
else:
SCREAMING_SNAKE_CASE_ : Any =BatchFeature({'input_values': speech} )
SCREAMING_SNAKE_CASE_ : int =self.pad(
_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE_ : Any =feature_size_hack
# convert input values to correct format
SCREAMING_SNAKE_CASE_ : Any =padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
SCREAMING_SNAKE_CASE_ : Tuple =[np.asarray(_UpperCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_UpperCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
SCREAMING_SNAKE_CASE_ : List[Any] =[array.astype(np.floataa ) for array in input_values]
elif isinstance(_UpperCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ : str =input_values.astype(np.floataa )
# convert attention_mask to correct format
SCREAMING_SNAKE_CASE_ : List[Any] =padded_inputs.get('attention_mask' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : int =[np.asarray(_UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
SCREAMING_SNAKE_CASE_ : List[str] =(
attention_mask
if self._get_padding_strategies(_UpperCamelCase , max_length=_UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE_ : Tuple =self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : Tuple =padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] =super().to_dict()
# Don't serialize these as they are derived from the other properties.
SCREAMING_SNAKE_CASE_ : List[Any] =['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 220 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_(__A , unittest.TestCase ):
"""simple docstring"""
a_ : Any = KandinskyVaaPriorPipeline
a_ : Union[str, Any] = ["prompt"]
a_ : str = ["prompt", "negative_prompt"]
a_ : Union[str, Any] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
a_ : Any = False
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ):
return 100
@property
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_UpperCamelCase )
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_lowerCamelCase : List[str] = PriorTransformer(**_UpperCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_lowerCamelCase : List[str] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_lowerCamelCase : str = CLIPVisionModelWithProjection(_UpperCamelCase )
return model
@property
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCamelCase , do_normalize=_UpperCamelCase , do_resize=_UpperCamelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = self.dummy_prior
_lowerCamelCase : str = self.dummy_image_encoder
_lowerCamelCase : Optional[int] = self.dummy_text_encoder
_lowerCamelCase : Union[str, Any] = self.dummy_tokenizer
_lowerCamelCase : Any = self.dummy_image_processor
_lowerCamelCase : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=_UpperCamelCase , clip_sample_range=1_0.0 , )
_lowerCamelCase : Any = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def _lowerCAmelCase ( self , A , A=0 ):
if str(_UpperCamelCase ).startswith('mps' ):
_lowerCamelCase : Optional[int] = torch.manual_seed(_UpperCamelCase )
else:
_lowerCamelCase : str = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowerCamelCase : List[str] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = 'cpu'
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : List[Any] = self.pipeline_class(**_UpperCamelCase )
_lowerCamelCase : List[Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
_lowerCamelCase : Any = output.image_embeds
_lowerCamelCase : Tuple = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
_lowerCamelCase : Dict = image[0, -10:]
_lowerCamelCase : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_lowerCamelCase : Optional[Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = torch_device == 'cpu'
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : int = False
self._test_inference_batch_single_identical(
test_max_difference=_UpperCamelCase , relax_max_difference=_UpperCamelCase , test_mean_pixel_difference=_UpperCamelCase , )
@skip_mps
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = torch_device == 'cpu'
_lowerCamelCase : Optional[int] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCamelCase , test_mean_pixel_difference=_UpperCamelCase , )
| 437 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_UpperCamelCase , hypotheses=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase )
}
| 624 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
_UpperCamelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCamelCase =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCamelCase =''''''
else:
_UpperCamelCase ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase =in_proj_bias[: config.hidden_size]
_UpperCamelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase =in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase =in_proj_bias[-config.hidden_size :]
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =dct.pop(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =val
def _a ():
"""simple docstring"""
_UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
_UpperCamelCase =ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCamelCase =8
# set labels if required
if not base_model:
_UpperCamelCase =1000
_UpperCamelCase ='''huggingface/label-files'''
_UpperCamelCase ='''imagenet-1k-id2label.json'''
_UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_UpperCamelCase =idalabel
_UpperCamelCase ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCamelCase =384
_UpperCamelCase =1536
_UpperCamelCase =12
_UpperCamelCase =6
# load original model from torch hub
_UpperCamelCase =torch.hub.load('''facebookresearch/dino:main''' , SCREAMING_SNAKE_CASE__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCamelCase =original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase =create_rename_keys(SCREAMING_SNAKE_CASE__ , base_model=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
if base_model:
_UpperCamelCase =ViTModel(SCREAMING_SNAKE_CASE__ , add_pooling_layer=SCREAMING_SNAKE_CASE__ ).eval()
else:
_UpperCamelCase =ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCamelCase =ViTImageProcessor()
_UpperCamelCase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_UpperCamelCase =encoding['''pixel_values''']
_UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
if base_model:
_UpperCamelCase =original_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCamelCase =original_model(SCREAMING_SNAKE_CASE__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__lowerCamelCase : str = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 404 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __A , unittest.TestCase ):
__snake_case = TransfoXLTokenizer
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''<unk> UNwanted , running'''
lowerCAmelCase_ = '''<unk> unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=_UpperCamelCase )
lowerCAmelCase_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(_UpperCamelCase, ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ), [0, 4, 8, 7] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TransfoXLTokenizer(lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TransfoXLTokenizer(lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TransfoXLTokenizer(lower_case=_UpperCamelCase )
lowerCAmelCase_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowerCAmelCase_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(_UpperCamelCase ), _UpperCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(_UpperCamelCase ), _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = len(_UpperCamelCase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''', 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_UpperCamelCase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ), [1] )
self.assertEqual(tokenizer.decode([1] ), '''new1''' )
| 431 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
UpperCAmelCase = DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE__ , with_box_refine=SCREAMING_SNAKE_CASE__ , two_stage=SCREAMING_SNAKE_CASE__ , )
# set labels
UpperCAmelCase = '''huggingface/label-files'''
if "o365" in model_name:
UpperCAmelCase = 366
UpperCAmelCase = '''object365-id2label.json'''
else:
UpperCAmelCase = 91
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[:dim, :]
UpperCAmelCase = in_proj_bias[: dim]
UpperCAmelCase = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase = in_proj_weight[
-dim :, :
]
UpperCAmelCase = in_proj_bias[-dim :]
# fmt: on
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[:hidden_size, :]
UpperCAmelCase = in_proj_bias[:hidden_size]
UpperCAmelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase = in_proj_weight[-hidden_size:, :]
UpperCAmelCase = in_proj_bias[-hidden_size:]
def __snake_case ( ) -> Any:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = get_deta_config(SCREAMING_SNAKE_CASE__ )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f"Model name {model_name} not supported" )
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
# rename keys
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = val
if "input_proj" in key:
UpperCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase = DetaForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(SCREAMING_SNAKE_CASE__ )
# load image processor
UpperCAmelCase = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
UpperCAmelCase = prepare_img()
UpperCAmelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
UpperCAmelCase = encoding['''pixel_values''']
UpperCAmelCase = model(pixel_values.to(SCREAMING_SNAKE_CASE__ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCAmelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCAmelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE__ ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f"jozhang97/{model_name}" )
processor.push_to_hub(f"jozhang97/{model_name}" )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger()
def a__ ( ):
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_UpperCAmelCase : Tuple = parser.parse_args()
return args.f
class _SCREAMING_SNAKE_CASE ( __A ):
def __snake_case( self ):
_UpperCAmelCase : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
def __snake_case( self , A_ ):
_UpperCAmelCase : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_UpperCamelCase , """argv""" , _UpperCamelCase ):
_UpperCAmelCase : str = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_UpperCamelCase , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __snake_case( self ):
_UpperCAmelCase : str = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_UpperCamelCase )
_UpperCAmelCase : Union[str, Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_UpperCamelCase )
_UpperCAmelCase : int = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_UpperCamelCase )
| 643 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : str=None , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : str ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] ) ->Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
snake_case_ = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , *_UpperCamelCase : List[Any] ) ->Union[str, Any]:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = self.seq_length // 2
snake_case_ = 0
# first forward pass
snake_case_, snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case_ = ids_tensor((1,) , _UpperCamelCase ).item() + 1
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case_ = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) ->int:
snake_case_ = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False ) ->Dict:
snake_case_ = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , *_UpperCamelCase : Dict ) ->Dict:
snake_case_ = BioGptModel(_UpperCamelCase )
snake_case_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , *_UpperCamelCase : List[str] ) ->int:
snake_case_ = self.num_labels
snake_case_ = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = BioGptModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : str ) ->int:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = '''left'''
# Define PAD Token = EOS Token = 50256
snake_case_ = tokenizer.eos_token
snake_case_ = model.config.eos_token_id
# use different length sentences to test batching
snake_case_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
snake_case_ = inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
snake_case_ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase )
snake_case_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
snake_case_ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__( self : Optional[int] ) ->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = '''multi_label_classification'''
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : int ) ->Any:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
snake_case_ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 4_2_3_8_4
snake_case_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
snake_case_ = model.generate(
**_UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCamelCase , )
snake_case_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyImgaImgPipeline
SCREAMING_SNAKE_CASE : int = ["prompt", "image_embeds", "negative_image_embeds", "image"]
SCREAMING_SNAKE_CASE : str = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
SCREAMING_SNAKE_CASE : List[Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE : Optional[Any] = False
@property
def UpperCamelCase ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase ( self : Any ):
return 32
@property
def UpperCamelCase ( self : Any ):
return self.time_input_dim
@property
def UpperCamelCase ( self : Any ):
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : Dict ):
return 100
@property
def UpperCamelCase ( self : Optional[Any] ):
A = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def UpperCamelCase ( self : int ):
torch.manual_seed(0 )
A = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
A = MultilingualCLIP(_UpperCamelCase )
A = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def UpperCamelCase ( self : int ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self : Any ):
torch.manual_seed(0 )
A = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self : Tuple ):
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_unet
A = self.dummy_movq
A = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
A = DDIMScheduler(**_UpperCamelCase )
A = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=0 ):
A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_UpperCamelCase )
# create init_image
A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((256, 256) )
if str(_UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(_UpperCamelCase )
else:
A = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
A = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self : str ):
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**_UpperCamelCase )
A = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
A = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
A = output.images
A = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Dict ):
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
A = 'A red cartoon frog, 4k'
A = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
A = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
A = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
A = torch.Generator(device='cpu' ).manual_seed(0 )
A , A = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
A = pipeline(
_UpperCamelCase , image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 699 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ = (boundary[1] - boundary[0]) / steps
snake_case_ = boundary[0]
snake_case_ = boundary[1]
snake_case_ = make_points(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE__ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
return y
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = a + h
while x < (b - h):
yield x
snake_case_ = x + h
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # enter your function here
snake_case_ = (x - 0) * (x - 0)
return y
def __SCREAMING_SNAKE_CASE ():
snake_case_ = 0.0 # Lower bound of integration
snake_case_ = 1.0 # Upper bound of integration
snake_case_ = 10.0 # define number of steps or resolution
snake_case_ = [a, b] # define boundary of integration
snake_case_ = method_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main() | 39 | 0 |
from itertools import product
def lowercase ( __A : List[Any] , __A : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Any = sides_number
snake_case : Dict = max_face_number * dice_number
snake_case : Optional[Any] = [0] * (max_total + 1)
snake_case : List[str] = 1
snake_case : int = range(SCREAMING_SNAKE_CASE__ , max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE__ , repeat=SCREAMING_SNAKE_CASE__ ):
snake_case : int = sum(SCREAMING_SNAKE_CASE__ )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase ( ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
snake_case : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
snake_case : List[Any] = 0
snake_case : Optional[Any] = 9
snake_case : str = 4 * 9
snake_case : int = 6
for peter_total in range(SCREAMING_SNAKE_CASE__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : Tuple = (4**9) * (6**6)
snake_case : Optional[Any] = peter_wins_count / total_games_number
snake_case : Tuple = round(SCREAMING_SNAKE_CASE__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 39 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : List[str]=False , ) -> Any:
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
else:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict = False ) -> str:
lowercase : List[str] =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase : Any ='''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase : int ='''cpu'''
lowercase : Optional[int] =StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple =Path(SCREAMING_SNAKE_CASE__ )
# TEXT ENCODER
lowercase : Any =pipeline.text_encoder.config.max_position_embeddings
lowercase : Any =pipeline.text_encoder.config.hidden_size
lowercase : Dict =pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=SCREAMING_SNAKE_CASE__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=SCREAMING_SNAKE_CASE__ , )
del pipeline.text_encoder
# UNET
lowercase : List[Any] =pipeline.unet.config.in_channels
lowercase : Dict =pipeline.unet.config.sample_size
lowercase : Optional[int] =output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
torch.randn(2 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
torch.randn(2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=SCREAMING_SNAKE_CASE__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , )
lowercase : str =str(unet_path.absolute().as_posix() )
lowercase : Tuple =os.path.dirname(SCREAMING_SNAKE_CASE__ )
lowercase : str =onnx.load(SCREAMING_SNAKE_CASE__ )
# clean up existing tensor files
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
os.mkdir(SCREAMING_SNAKE_CASE__ )
# collate external tensor files into one
onnx.save_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , save_as_external_data=SCREAMING_SNAKE_CASE__ , all_tensors_to_one_file=SCREAMING_SNAKE_CASE__ , location='''weights.pb''' , convert_attribute=SCREAMING_SNAKE_CASE__ , )
del pipeline.unet
# VAE ENCODER
lowercase : str =pipeline.vae
lowercase : List[str] =vae_encoder.config.in_channels
lowercase : List[Any] =vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase : str =lambda __magic_name__ , __magic_name__ : vae_encoder.encode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0].sample()
onnx_export(
SCREAMING_SNAKE_CASE__ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE__ , )
# VAE DECODER
lowercase : List[str] =pipeline.vae
lowercase : List[str] =vae_decoder.config.latent_channels
lowercase : Union[str, Any] =vae_decoder.config.out_channels
# forward only through the decoder part
lowercase : Dict =vae_encoder.decode
onnx_export(
SCREAMING_SNAKE_CASE__ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase : Dict =pipeline.safety_checker
lowercase : Dict =safety_checker.config.vision_config.num_channels
lowercase : Any =safety_checker.config.vision_config.image_size
lowercase : Optional[Any] =safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
torch.randn(1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=SCREAMING_SNAKE_CASE__ , )
del pipeline.safety_checker
lowercase : Any =OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
lowercase : List[str] =pipeline.feature_extractor
else:
lowercase : Optional[Any] =None
lowercase : Optional[int] =None
lowercase : Optional[int] =OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('''ONNX pipeline saved to''' , SCREAMING_SNAKE_CASE__ )
del pipeline
del onnx_pipeline
lowercase : Tuple =OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCamelCase_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 92 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : List[Any]=2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Any=7 , __magic_name__ : int=True , __magic_name__ : Any=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]=True , __magic_name__ : Any=99 , __magic_name__ : int=36 , __magic_name__ : str=3 , __magic_name__ : str=4 , __magic_name__ : List[str]=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Dict=5_12 , __magic_name__ : str=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : int=0.02 , __magic_name__ : Any=6 , __magic_name__ : Optional[Any]=6 , __magic_name__ : Optional[int]=3 , __magic_name__ : Any=4 , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=10_00 , ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = parent
__snake_case : Optional[int] = batch_size
__snake_case : int = num_channels
__snake_case : int = image_size
__snake_case : str = patch_size
__snake_case : Union[str, Any] = text_seq_length
__snake_case : Union[str, Any] = is_training
__snake_case : List[str] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : List[Any] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[Any] = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : int = type_vocab_size
__snake_case : List[str] = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : Union[str, Any] = coordinate_size
__snake_case : Tuple = shape_size
__snake_case : Dict = num_labels
__snake_case : Optional[Any] = num_choices
__snake_case : Union[str, Any] = scope
__snake_case : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : Union[str, Any] = text_seq_length
__snake_case : Union[str, Any] = (image_size // patch_size) ** 2 + 1
__snake_case : Any = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Dict = bbox[i, j, 3]
__snake_case : Optional[int] = bbox[i, j, 1]
__snake_case : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[int] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : List[Any] = t
__snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
if self.use_input_mask:
__snake_case : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : Tuple = None
if self.use_token_type_ids:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : Any = None
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = LayoutLMvaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# text + image
__snake_case : Tuple = model(_UpperCamelCase , pixel_values=_UpperCamelCase )
__snake_case : Any = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
__snake_case : str = model(_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , token_type_ids=_UpperCamelCase )
__snake_case : Union[str, Any] = model(_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : int = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : str = model(pixel_values=_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Union[str, Any] = LayoutLMvaForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : Optional[Any] = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[Any] = LayoutLMvaForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : str = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = LayoutLMvaForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : int = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Any = config_and_inputs
__snake_case : Tuple = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _A ( __A , __A , unittest.TestCase ):
lowercase__: Union[str, Any] = False
lowercase__: Any = False
lowercase__: Optional[Any] = False
lowercase__: Any = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__: List[Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowercase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
return True
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = LayoutLMvaModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def lowercase__ ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int=False ) -> List[str]:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(_UpperCamelCase )
if model_class in get_values(_UpperCamelCase ):
__snake_case : Union[str, Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_UpperCamelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCamelCase ):
__snake_case : Any = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in get_values(_UpperCamelCase ):
__snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
__snake_case : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in [
*get_values(_UpperCamelCase ),
]:
__snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in [
*get_values(_UpperCamelCase ),
]:
__snake_case : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_UpperCamelCase , )
return inputs_dict
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : List[str] = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = LayoutLMvaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _a ( ) -> str:
"""simple docstring"""
__snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) if is_vision_available() else None
@slow
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
__snake_case : Tuple = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(_UpperCamelCase )
__snake_case : Dict = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : Dict = image_processor(images=_UpperCamelCase , return_tensors="""pt""" ).pixel_values.to(_UpperCamelCase )
__snake_case : Dict = torch.tensor([[1, 2]] )
__snake_case : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__snake_case : str = model(
input_ids=input_ids.to(_UpperCamelCase ) , bbox=bbox.to(_UpperCamelCase ) , pixel_values=pixel_values.to(_UpperCamelCase ) , )
# verify the logits
__snake_case : Optional[Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCamelCase )
__snake_case : Tuple = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 26 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE = random.Random()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : Any=1.0 ,lowerCAmelCase_ : int=None ,lowerCAmelCase_ : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE_ : List[Any] =global_rng
SCREAMING_SNAKE_CASE_ : str =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=400 , __UpperCAmelCase=2_000 , __UpperCAmelCase=1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=16_000 , __UpperCAmelCase=True , __UpperCAmelCase=80 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase="hann_window" , __UpperCAmelCase=80 , __UpperCAmelCase=7_600 , __UpperCAmelCase=1E-10 , __UpperCAmelCase=True , ):
SCREAMING_SNAKE_CASE_ : Any =parent
SCREAMING_SNAKE_CASE_ : List[str] =batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] =min_seq_length
SCREAMING_SNAKE_CASE_ : List[Any] =max_seq_length
SCREAMING_SNAKE_CASE_ : Tuple =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ : Any =feature_size
SCREAMING_SNAKE_CASE_ : int =padding_value
SCREAMING_SNAKE_CASE_ : Dict =sampling_rate
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] =num_mel_bins
SCREAMING_SNAKE_CASE_ : Optional[Any] =hop_length
SCREAMING_SNAKE_CASE_ : Optional[Any] =win_length
SCREAMING_SNAKE_CASE_ : List[str] =win_function
SCREAMING_SNAKE_CASE_ : Optional[Any] =fmin
SCREAMING_SNAKE_CASE_ : Any =fmax
SCREAMING_SNAKE_CASE_ : List[str] =mel_floor
SCREAMING_SNAKE_CASE_ : Optional[Any] =return_attention_mask
def __lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase=False ):
def _flatten(__UpperCAmelCase ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ : List[Any] =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ : Tuple =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ : Dict =[np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
def __lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ : str =[
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCAmelCase_ ( __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = SpeechTaFeatureExtractor
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : str =SpeechTaFeatureExtractionTester(self )
def __lowerCamelCase ( self , __UpperCAmelCase ):
self.assertTrue(np.all(np.mean(_UpperCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ : List[Any] =[floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ : Optional[int] =[np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ : int =feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ : List[str] =feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ : str =feat_extract(_UpperCamelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ : Any =feat_extract(_UpperCamelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ : Optional[Any] =[floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ : Optional[Any] =['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ : Optional[int] =[None, 1_600, None]
for max_length, padding in zip(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =feat_extract(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : List[str] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ : int =range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ : List[Any] =['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ : Optional[int] =[None, 1_600, None]
for max_length, padding in zip(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : str =feat_extract(_UpperCamelCase , max_length=_UpperCamelCase , padding=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ : Tuple =[floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ : int =feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Dict =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ : Tuple =[floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ : Optional[Any] =feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ : Tuple =[floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Dict =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ : int =np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : Optional[int] =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ : Tuple =feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ : int =feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ : Optional[int] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ : List[str] =[floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ : str =[np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =feature_extractor(audio_target=_UpperCamelCase , padding=_UpperCamelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ : Dict =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ : int =feature_extractor(_UpperCamelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ : Union[str, Any] =feature_extractor(_UpperCamelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ : List[str] =[floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ : Dict =np.asarray(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : str =feature_extractor(_UpperCamelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ : List[str] =feature_extractor(_UpperCamelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any =self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : List[str] =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Dict =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCamelCase ) == len(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ : Tuple =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] =BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ : List[Any] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ : List[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : List[str] =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Any =BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ : Optional[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ : int =feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ : Union[str, Any] =feat_extract.pad(_UpperCamelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ : List[str] =feat_extract.pad(_UpperCamelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.feat_extract_dict
SCREAMING_SNAKE_CASE_ : Tuple =True
SCREAMING_SNAKE_CASE_ : Dict =self.feature_extraction_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int =self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ : Dict =[len(_UpperCamelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ : Any =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Optional[int] =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ : List[str] =feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ : List[Any] =feat_extract.pad(_UpperCamelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCamelCase )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =self.feat_extract_dict
SCREAMING_SNAKE_CASE_ : List[str] =True
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extraction_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : str =self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ : Any =[len(_UpperCamelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ : str =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : int =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ : Any =min(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ : Tuple =feat_extract.pad(
_UpperCamelCase , padding='max_length' , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='np' )
self.assertIn('attention_mask' , _UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowerCamelCase ( self , __UpperCAmelCase ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : List[Any] =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ : Any =ds.sort('id' ).select(range(_UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self ):
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ : Optional[Any] =self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ : List[Any] =SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : List[str] =feature_extractor(_UpperCamelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _UpperCamelCase , atol=1E-6 ) )
def __lowerCamelCase ( self ):
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
SCREAMING_SNAKE_CASE_ : Optional[int] =self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : Tuple =feature_extractor(audio_target=_UpperCamelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCamelCase , atol=1E-4 ) )
| 220 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 0 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( __a : Optional[Any] , __a : Tuple ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase_ ( __a : Tuple , __a : Dict , __a : Dict ):
'''simple docstring'''
_lowerCamelCase : Dict = tmp_path / 'cache'
_lowerCamelCase : Union[str, Any] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Any = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCAmelCase_ ( __a : str , __a : Dict , __a : List[str] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = tmp_path / 'cache'
_lowerCamelCase : Optional[int] = {'text': 'string'}
_lowerCamelCase : Any = features.copy() if features else default_expected_features
_lowerCamelCase : Dict = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : List[str] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase_ ( __a : Dict , __a : Optional[Any] , __a : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
_lowerCamelCase : Any = {'text': 'string'}
_lowerCamelCase : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCAmelCase_ ( __a : Union[str, Any] , __a : Union[str, Any] , __a : Optional[Any] ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : Union[str, Any] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : List[Any] = [text_path]
_lowerCamelCase : List[Any] = tmp_path / 'cache'
_lowerCamelCase : str = {'text': 'string'}
_lowerCamelCase : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( __a : List[Any] , __a : Tuple , __a : Optional[int]=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_lowerCamelCase : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase_ ( __a : List[Any] , __a : List[Any] , __a : Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = tmp_path / 'cache'
_lowerCamelCase : Optional[int] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Optional[int] = TextDatasetReader({'train': text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCAmelCase_ ( __a : Optional[Any] , __a : Union[str, Any] , __a : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_lowerCamelCase : List[Any] = {'text': 'string'}
_lowerCamelCase : Tuple = features.copy() if features else default_expected_features
_lowerCamelCase : Optional[int] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Tuple = TextDatasetReader({'train': text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase_ ( __a : Optional[Any] , __a : Optional[int] , __a : Optional[Any] ):
'''simple docstring'''
if split:
_lowerCamelCase : Dict = {split: text_path}
else:
_lowerCamelCase : Union[str, Any] = 'train'
_lowerCamelCase : Optional[Any] = {'train': text_path, 'test': text_path}
_lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
_lowerCamelCase : Optional[Any] = {'text': 'string'}
_lowerCamelCase : str = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 437 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 624 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __A , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = LongformerTokenizer
lowerCAmelCase_ = True
lowerCAmelCase_ = LongformerTokenizerFast
lowerCAmelCase_ = True
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_UpperCamelCase =dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_UpperCamelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCamelCase ={'''unk_token''': '''<unk>'''}
_UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCamelCase ) )
def UpperCamelCase__ ( self : Union[str, Any] , **UpperCamelCase__ : int ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase__ ( self : int , **UpperCamelCase__ : Dict ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : Optional[Any] ) -> str:
_UpperCamelCase ='''lower newer'''
_UpperCamelCase ='''lower newer'''
return input_text, output_text
def UpperCamelCase__ ( self : List[Any] ) -> int:
_UpperCamelCase =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase ='''lower newer'''
_UpperCamelCase =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_UpperCamelCase =tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase =tokens + [tokenizer.unk_token]
_UpperCamelCase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase__ ( self : Optional[int] ) -> Any:
_UpperCamelCase =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_UpperCamelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_UpperCamelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def UpperCamelCase__ ( self : Optional[int] ) -> int:
_UpperCamelCase =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
_UpperCamelCase =tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCamelCase )
_UpperCamelCase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCamelCase )
_UpperCamelCase =tokenizer.encode(
'''sequence builders''' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_UpperCamelCase =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_UpperCamelCase =tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_UpperCamelCase =tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase__ ( self : Any ) -> Tuple:
_UpperCamelCase =self.get_tokenizer()
_UpperCamelCase ='''Encode this sequence.'''
_UpperCamelCase =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_UpperCamelCase =tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase =tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_UpperCamelCase =tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# Testing spaces after special tokens
_UpperCamelCase ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space
_UpperCamelCase =tokenizer.convert_tokens_to_ids(_UpperCamelCase )
_UpperCamelCase ='''Encode <mask> sequence'''
_UpperCamelCase ='''Encode <mask>sequence'''
_UpperCamelCase =tokenizer.encode(_UpperCamelCase )
_UpperCamelCase =encoded.index(_UpperCamelCase )
_UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase =tokenizer.encode(_UpperCamelCase )
_UpperCamelCase =encoded.index(_UpperCamelCase )
_UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self : Optional[int] ) -> Optional[int]:
pass
def UpperCamelCase__ ( self : List[Any] ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase =self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_UpperCamelCase =self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_UpperCamelCase ='''A, <mask> AllenNLP sentence.'''
_UpperCamelCase =tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
_UpperCamelCase =tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_UpperCamelCase =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_UpperCamelCase =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_UpperCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_UpperCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCamelCase__ ( self : str ) -> int:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _UpperCamelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , _UpperCamelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , _UpperCamelCase )
def UpperCamelCase__ ( self : str ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCamelCase =F'''{text_of_1_token} {text_of_1_token}'''
_UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_UpperCamelCase =tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_UpperCamelCase =tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_UpperCamelCase =tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_UpperCamelCase =tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_UpperCamelCase =F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_UpperCamelCase =tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_UpperCamelCase =tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_UpperCamelCase =tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
| 404 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class A ( __A ):
__snake_case = "deta"
__snake_case = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, UpperCamelCase__=None, UpperCamelCase__=900, UpperCamelCase__=2048, UpperCamelCase__=6, UpperCamelCase__=2048, UpperCamelCase__=8, UpperCamelCase__=6, UpperCamelCase__=1024, UpperCamelCase__=8, UpperCamelCase__=0.0, UpperCamelCase__=True, UpperCamelCase__="relu", UpperCamelCase__=256, UpperCamelCase__=0.1, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1.0, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__="sine", UpperCamelCase__=5, UpperCamelCase__=4, UpperCamelCase__=4, UpperCamelCase__=True, UpperCamelCase__=300, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=1, UpperCamelCase__=5, UpperCamelCase__=2, UpperCamelCase__=1, UpperCamelCase__=1, UpperCamelCase__=5, UpperCamelCase__=2, UpperCamelCase__=0.1, UpperCamelCase__=0.25, **UpperCamelCase__, ):
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCAmelCase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_UpperCamelCase, _UpperCamelCase ):
lowerCAmelCase_ = backbone_config.pop('''model_type''' )
lowerCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ = config_class.from_dict(_UpperCamelCase )
lowerCAmelCase_ = backbone_config
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = init_xavier_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = auxiliary_loss
lowerCAmelCase_ = position_embedding_type
# deformable attributes
lowerCAmelCase_ = num_feature_levels
lowerCAmelCase_ = encoder_n_points
lowerCAmelCase_ = decoder_n_points
lowerCAmelCase_ = two_stage
lowerCAmelCase_ = two_stage_num_proposals
lowerCAmelCase_ = with_box_refine
lowerCAmelCase_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowerCAmelCase_ = class_cost
lowerCAmelCase_ = bbox_cost
lowerCAmelCase_ = giou_cost
# Loss coefficients
lowerCAmelCase_ = mask_loss_coefficient
lowerCAmelCase_ = dice_loss_coefficient
lowerCAmelCase_ = bbox_loss_coefficient
lowerCAmelCase_ = giou_loss_coefficient
lowerCAmelCase_ = eos_coefficient
lowerCAmelCase_ = focal_alpha
super().__init__(is_encoder_decoder=_UpperCamelCase, **_UpperCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.backbone_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 431 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class lowerCAmelCase__ ( __A ):
'''simple docstring'''
_lowerCamelCase ="gpt_neox_japanese"
def __init__( self : Any , a__ : List[str]=32000 , a__ : Union[str, Any]=2560 , a__ : List[Any]=32 , a__ : int=32 , a__ : Optional[int]=4 , a__ : List[str]="gelu" , a__ : Union[str, Any]=1.00 , a__ : Dict=10000 , a__ : List[str]=2048 , a__ : Optional[int]=0.02 , a__ : Dict=1e-5 , a__ : List[Any]=True , a__ : List[str]=31996 , a__ : Optional[int]=31999 , a__ : Any=0.1 , a__ : List[str]=0.0 , **a__ : Optional[Any] , ):
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_multiple_size
UpperCAmelCase = hidden_act
UpperCAmelCase = rotary_pct
UpperCAmelCase = rotary_emb_base
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = attention_dropout
UpperCAmelCase = hidden_dropout
| 51 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __A ):
def __init__( self , *A_ , **A_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 643 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase = {
"gpt2": 1_024,
"gpt2-medium": 1_024,
"gpt2-large": 1_024,
"gpt2-xl": 1_024,
"distilgpt2": 1_024,
}
class _UpperCAmelCase ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer
def __init__( self : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int="<|endoftext|>" , UpperCamelCase__ : List[Any]="<|endoftext|>" , UpperCamelCase__ : Any="<|endoftext|>" , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : Tuple , ):
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
A = kwargs.pop('add_bos_token' , _UpperCamelCase )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCamelCase ) != add_prefix_space:
A = getattr(_UpperCamelCase , pre_tok_state.pop('type' ) )
A = add_prefix_space
A = pre_tok_class(**_UpperCamelCase )
A = add_prefix_space
def UpperCamelCase ( self : Tuple , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ):
A = kwargs.get('is_split_into_words' , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase ( self : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
A = kwargs.get('is_split_into_words' , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
A = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def UpperCamelCase ( self : str , UpperCamelCase__ : "Conversation" ):
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
| 699 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _A ( __A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = CpmAntTokenizer
__lowerCamelCase : Optional[Any] = False
def snake_case_ ( self ):
'''simple docstring'''
super().setUp()
snake_case : Union[str, Any] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
snake_case : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
snake_case : List[str] = """今天天气真好!"""
snake_case : Dict = ["""今天""", """天气""", """真""", """好""", """!"""]
snake_case : Tuple = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
snake_case : int = """今天天气真好!"""
snake_case : List[str] = [tokenizer.bos_token] + tokens
snake_case : Dict = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,_UpperCamelCase )
snake_case : Optional[int] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
| 36 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
UpperCamelCase_ = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
UpperCamelCase_ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class __SCREAMING_SNAKE_CASE ( __A ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = []
lowerCamelCase_ = []
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : Optional[int]="<unk>" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : Tuple="<mask>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase : int =AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
lowercase : Any ={} if sp_model_kwargs is None else sp_model_kwargs
lowercase : int =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
lowercase : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
lowercase : Tuple =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : Tuple ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Optional[Any] =1
lowercase : Optional[Any] =len(self.sp_model )
lowercase : str ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
lowercase : str ={v: k for k, v in self.lang_code_to_id.items()}
lowercase : Optional[Any] =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase : List[str] =src_lang if src_lang is not None else '''en_XX'''
lowercase : Any =self.lang_code_to_id[self._src_lang]
lowercase : Any =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Optional[Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
'''simple docstring'''
lowercase : int =self.__dict__.copy()
lowercase : List[str] =None
return state
def __setstate__( self : List[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Any =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : str ={}
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[str] ={self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : int =self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =[]
lowercase : int =''''''
lowercase : Union[str, Any] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
lowercase : Optional[int] =True
lowercase : List[Any] =[]
else:
current_sub_tokens.append(_UpperCamelCase )
lowercase : List[Any] =False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : int =os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
lowercase : Optional[int] =[1] * len(self.prefix_tokens )
lowercase : Any =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase : str =src_lang
lowercase : List[Any] =self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
lowercase : str =self.convert_tokens_to_ids(_UpperCamelCase )
lowercase : Optional[Any] =tgt_lang_id
return inputs
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "en_XX" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "ro_RO" , **UpperCAmelCase__ : int , ):
'''simple docstring'''
lowercase : Optional[Any] =src_lang
lowercase : Tuple =tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Tuple =self.lang_code_to_id[src_lang]
lowercase : str =[self.cur_lang_code_id]
lowercase : Optional[Any] =[self.eos_token_id]
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Any =self.lang_code_to_id[tgt_lang]
lowercase : Any =[self.cur_lang_code_id]
lowercase : Tuple =[self.eos_token_id]
| 92 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class _A ( __A ):
def __init__( self : int ) -> Optional[Any]:
"""simple docstring"""
self.test()
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = 0
__snake_case : List[str] = False
while not completed:
if counter == 1:
self.reset()
__snake_case : Any = self.advance()
if not self.does_advance(_UpperCamelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
__snake_case , __snake_case , __snake_case : Any = self.update(_UpperCamelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase__ ( self : Union[str, Any] , __magic_name__ : int ) -> int:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase__ ( self : List[str] , __magic_name__ : List[Any]=False ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _A ( __A ):
def __init__( self : Optional[Any] , __magic_name__ : List[int] ) -> Dict:
"""simple docstring"""
super(_UpperCamelCase , self ).__init__()
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCamelCase , _UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__snake_case : List[str] = token_ids
__snake_case : Any = len(self.token_ids )
__snake_case : List[Any] = -1 # the index of the currently fulfilled step
__snake_case : Optional[int] = False
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowercase__ ( self : Union[str, Any] , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowercase__ ( self : Union[str, Any] , __magic_name__ : int ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCamelCase )}''' )
__snake_case : List[str] = False
__snake_case : Union[str, Any] = False
__snake_case : List[str] = False
if self.does_advance(_UpperCamelCase ):
self.fulfilled_idx += 1
__snake_case : List[str] = True
if self.fulfilled_idx == (self.seqlen - 1):
__snake_case : List[str] = True
__snake_case : Any = completed
else:
# failed to make progress.
__snake_case : int = True
self.reset()
return stepped, completed, reset
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = False
__snake_case : List[Any] = 0
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def lowercase__ ( self : str , __magic_name__ : Union[str, Any]=False ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = PhrasalConstraint(self.token_ids )
if stateful:
__snake_case : Any = self.seqlen
__snake_case : int = self.fulfilled_idx
__snake_case : str = self.completed
return new_constraint
class _A :
def __init__( self : List[str] , __magic_name__ : List[List[int]] , __magic_name__ : List[Any]=True ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = max([len(_UpperCamelCase ) for one in nested_token_ids] )
__snake_case : Optional[Any] = {}
for token_ids in nested_token_ids:
__snake_case : Union[str, Any] = root
for tidx, token_id in enumerate(_UpperCamelCase ):
if token_id not in level:
__snake_case : int = {}
__snake_case : List[str] = level[token_id]
if no_subsets and self.has_subsets(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can\'t be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
__snake_case : int = root
def lowercase__ ( self : Any , __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = self.trie
for current_token in current_seq:
__snake_case : Any = start[current_token]
__snake_case : Dict = list(start.keys() )
return next_tokens
def lowercase__ ( self : Optional[int] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = self.next_tokens(_UpperCamelCase )
return len(_UpperCamelCase ) == 0
def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = list(root.values() )
if len(_UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCamelCase ) for nn in next_nodes] )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Any = self.count_leaves(_UpperCamelCase )
return len(_UpperCamelCase ) != leaf_count
class _A ( __A ):
def __init__( self : Optional[int] , __magic_name__ : List[List[int]] ) -> Any:
"""simple docstring"""
super(_UpperCamelCase , self ).__init__()
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCamelCase , _UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCamelCase , _UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__snake_case : Optional[Any] = DisjunctiveTrie(_UpperCamelCase )
__snake_case : Tuple = nested_token_ids
__snake_case : List[str] = self.trie.max_height
__snake_case : str = []
__snake_case : List[str] = False
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = self.trie.next_tokens(self.current_seq )
if len(_UpperCamelCase ) == 0:
return None
else:
return token_list
def lowercase__ ( self : Dict , __magic_name__ : int ) -> Dict:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCamelCase )}''' )
__snake_case : Union[str, Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowercase__ ( self : Tuple , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCamelCase )}''' )
__snake_case : Union[str, Any] = False
__snake_case : Tuple = False
__snake_case : Any = False
if self.does_advance(_UpperCamelCase ):
self.current_seq.append(_UpperCamelCase )
__snake_case : List[str] = True
else:
__snake_case : Union[str, Any] = True
self.reset()
__snake_case : List[str] = self.trie.reached_leaf(self.current_seq )
__snake_case : Dict = completed
return stepped, completed, reset
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = False
__snake_case : Dict = []
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
__snake_case : Tuple = self.seqlen
__snake_case : List[str] = self.current_seq
__snake_case : int = self.completed
return new_constraint
class _A :
def __init__( self : Tuple , __magic_name__ : List[Constraint] ) -> str:
"""simple docstring"""
__snake_case : int = constraints
# max # of steps required to fulfill a given constraint
__snake_case : Union[str, Any] = max([c.seqlen for c in constraints] )
__snake_case : Dict = len(_UpperCamelCase )
__snake_case : Union[str, Any] = False
self.init_state()
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = []
__snake_case : Optional[Any] = None
__snake_case : Dict = [constraint.copy(stateful=_UpperCamelCase ) for constraint in self.constraints]
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__snake_case : Optional[int] = constraint.advance()
if isinstance(_UpperCamelCase , _UpperCamelCase ):
token_list.append(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
token_list.extend(_UpperCamelCase )
else:
__snake_case : Any = self.inprogress_constraint.advance()
if isinstance(_UpperCamelCase , _UpperCamelCase ):
token_list.append(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
token_list.extend(_UpperCamelCase )
if len(_UpperCamelCase ) == 0:
return None
else:
return token_list
def lowercase__ ( self : Dict , __magic_name__ : Optional[List[int]] ) -> List[Any]:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__snake_case , __snake_case : Tuple = self.add(_UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowercase__ ( self : Optional[int] , __magic_name__ : int ) -> List[Any]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
__snake_case , __snake_case : Optional[int] = False, False
if self.completed:
__snake_case : List[str] = True
__snake_case : Tuple = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__snake_case , __snake_case , __snake_case : Dict = self.inprogress_constraint.update(_UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCamelCase ) )
__snake_case : Any = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__snake_case : Optional[Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
__snake_case : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCamelCase ):
__snake_case , __snake_case , __snake_case : Optional[Any] = pending_constraint.update(_UpperCamelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_UpperCamelCase )
__snake_case : Any = None
if not complete and stepped:
__snake_case : Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__snake_case : int = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__snake_case : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowercase__ ( self : int , __magic_name__ : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__snake_case : Tuple = [
constraint.copy(stateful=_UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__snake_case : Union[str, Any] = self.inprogress_constraint.copy(stateful=_UpperCamelCase )
__snake_case : List[str] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__A ):
'''simple docstring'''
_lowercase = ["flax", "transformers"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
class lowerCAmelCase_ ( metaclass=__A ):
'''simple docstring'''
_lowercase = ["flax", "transformers"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
class lowerCAmelCase_ ( metaclass=__A ):
'''simple docstring'''
_lowercase = ["flax", "transformers"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
class lowerCAmelCase_ ( metaclass=__A ):
'''simple docstring'''
_lowercase = ["flax", "transformers"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
| 220 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a_ = logging.get_logger(__name__)
a_ = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A_(__A ):
"""simple docstring"""
a_ : Union[str, Any] = "dpt"
def __init__( self , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.0 , A=0.0 , A=0.0_2 , A=1E-12 , A=384 , A=16 , A=3 , A=False , A=True , A=[2, 5, 8, 11] , A="project" , A=[4, 2, 1, 0.5] , A=[96, 192, 384, 768] , A=256 , A=-1 , A=False , A=True , A=0.4 , A=255 , A=0.1 , A=[1, 1024, 24, 24] , A=[0, 1] , A=None , **A , ):
super().__init__(**_UpperCamelCase )
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : int = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
_lowerCamelCase : int = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
_lowerCamelCase : List[str] = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('Initializing the config with a `BiT` backbone.' )
_lowerCamelCase : Any = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowerCamelCase : List[str] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
_lowerCamelCase : Tuple = backbone_featmap_shape
_lowerCamelCase : int = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
_lowerCamelCase : Dict = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : List[Any] = patch_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : List[str] = qkv_bias
_lowerCamelCase : Optional[int] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
_lowerCamelCase : Optional[Any] = readout_type
_lowerCamelCase : Union[str, Any] = reassemble_factors
_lowerCamelCase : Tuple = neck_hidden_sizes
_lowerCamelCase : Optional[Any] = fusion_hidden_size
_lowerCamelCase : Union[str, Any] = head_in_index
_lowerCamelCase : Any = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowerCamelCase : Dict = use_auxiliary_head
_lowerCamelCase : Tuple = auxiliary_loss_weight
_lowerCamelCase : str = semantic_loss_ignore_index
_lowerCamelCase : Dict = semantic_classifier_dropout
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCamelCase : Optional[Any] = self.backbone_config.to_dict()
_lowerCamelCase : Optional[int] = self.__class__.model_type
return output
| 437 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 624 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : str=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : int=37 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : str=None , ) -> Dict:
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =seq_length
_UpperCamelCase =is_training
_UpperCamelCase =use_input_mask
_UpperCamelCase =use_token_type_ids
_UpperCamelCase =use_labels
_UpperCamelCase =vocab_size
_UpperCamelCase =hidden_size
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
_UpperCamelCase =intermediate_size
_UpperCamelCase =hidden_act
_UpperCamelCase =hidden_dropout_prob
_UpperCamelCase =attention_probs_dropout_prob
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =type_vocab_size
_UpperCamelCase =type_sequence_label_size
_UpperCamelCase =initializer_range
_UpperCamelCase =num_labels
_UpperCamelCase =num_choices
_UpperCamelCase =scope
def UpperCamelCase__ ( self : str ) -> List[Any]:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase =None
if self.use_input_mask:
_UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase =None
if self.use_token_type_ids:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =None
if self.use_labels:
_UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : List[str] ) -> Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> Dict:
_UpperCamelCase =BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase )
_UpperCamelCase =model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , ) -> Optional[int]:
_UpperCamelCase =BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , *UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
_UpperCamelCase =BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
_UpperCamelCase =torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
_UpperCamelCase =self.seq_length // 2
_UpperCamelCase =0
# first forward pass
_UpperCamelCase , _UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase =ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_UpperCamelCase =ids_tensor((1,) , _UpperCamelCase ).item() + 1
_UpperCamelCase =ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_UpperCamelCase =random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCamelCase =torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
_UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
_UpperCamelCase =model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
_UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCamelCase =output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCamelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , *UpperCamelCase__ : List[Any] ) -> int:
_UpperCamelCase =BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
_UpperCamelCase =torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
_UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
_UpperCamelCase , _UpperCamelCase =outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase =ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCamelCase =torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
_UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
_UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
def UpperCamelCase__ ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str]=False ) -> Dict:
_UpperCamelCase =BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCamelCase =model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , *UpperCamelCase__ : Dict ) -> Dict:
_UpperCamelCase =BioGptModel(_UpperCamelCase )
_UpperCamelCase =model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , *UpperCamelCase__ : List[str] ) -> int:
_UpperCamelCase =self.num_labels
_UpperCamelCase =BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Optional[Any] ) -> int:
_UpperCamelCase =self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) =config_and_inputs
_UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __A , __A , __A , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
def UpperCamelCase__ ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase =BioGptModelTester(self )
_UpperCamelCase =ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ ( self : str ) -> int:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : str ) -> Tuple:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase__ ( self : Tuple ) -> List[Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase =type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase__ ( self : Tuple ) -> str:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def UpperCamelCase__ ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def UpperCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def UpperCamelCase__ ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def UpperCamelCase__ ( self : int ) -> Optional[Any]:
_UpperCamelCase =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
_UpperCamelCase =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_UpperCamelCase ='''left'''
# Define PAD Token = EOS Token = 50256
_UpperCamelCase =tokenizer.eos_token
_UpperCamelCase =model.config.eos_token_id
# use different length sentences to test batching
_UpperCamelCase =[
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCamelCase =tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
_UpperCamelCase =inputs['''input_ids'''].to(_UpperCamelCase )
_UpperCamelCase =model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
_UpperCamelCase =tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
_UpperCamelCase =model.generate(input_ids=_UpperCamelCase )
_UpperCamelCase =inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_UpperCamelCase =tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
_UpperCamelCase =model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
_UpperCamelCase =tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
_UpperCamelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
_UpperCamelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
_UpperCamelCase =[
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase__ ( self : Optional[int] ) -> List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase =BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase__ ( self : Optional[int] ) -> str:
_UpperCamelCase , _UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase =3
_UpperCamelCase =input_dict['''input_ids''']
_UpperCamelCase =input_ids.ne(1 ).to(_UpperCamelCase )
_UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCamelCase =BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self : str ) -> str:
_UpperCamelCase , _UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase =3
_UpperCamelCase ='''multi_label_classification'''
_UpperCamelCase =input_dict['''input_ids''']
_UpperCamelCase =input_ids.ne(1 ).to(_UpperCamelCase )
_UpperCamelCase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCamelCase =BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCamelCase =model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : int ) -> Any:
_UpperCamelCase =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
_UpperCamelCase =torch.tensor([[2, 4805, 9, 656, 21]] )
_UpperCamelCase =model(_UpperCamelCase )[0]
_UpperCamelCase =4_2384
_UpperCamelCase =torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
_UpperCamelCase =torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
_UpperCamelCase =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_UpperCamelCase =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
_UpperCamelCase =tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCamelCase =model.generate(
**_UpperCamelCase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_UpperCamelCase , )
_UpperCamelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
_UpperCamelCase =(
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
| 404 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_A = sys.version_info >= (3, 10)
def __UpperCamelCase ( _A=None , _A=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class A :
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
@dataclass
class A :
__snake_case = 42
__snake_case = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
__snake_case = False
__snake_case = True
__snake_case = None
class A ( __A ):
__snake_case = "titi"
__snake_case = "toto"
class A ( __A ):
__snake_case = "titi"
__snake_case = "toto"
__snake_case = 42
@dataclass
class A :
__snake_case = "toto"
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicEnum(self.foo )
@dataclass
class A :
__snake_case = "toto"
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = MixedTypeEnum(self.foo )
@dataclass
class A :
__snake_case = None
__snake_case = field(default=__A , metadata={'help': 'help message'} )
__snake_case = None
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[] )
@dataclass
class A :
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[1, 2, 3] )
__snake_case = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__snake_case = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
__snake_case = field()
__snake_case = field()
__snake_case = field()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicEnum(self.required_enum )
@dataclass
class A :
__snake_case = 42
__snake_case = field()
__snake_case = None
__snake_case = field(default='toto' , metadata={'help': 'help message'} )
__snake_case = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
__snake_case = False
__snake_case = True
__snake_case = None
@dataclass
class A :
__snake_case = None
__snake_case = field(default=__A , metadata={'help': 'help message'} )
__snake_case = None
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[] )
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
self.assertEqual(len(a._actions ), len(b._actions ) )
for x, y in zip(a._actions, b._actions ):
lowerCAmelCase_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
lowerCAmelCase_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''', _UpperCamelCase ) and yy.get('''choices''', _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ), yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase, _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''', type=_UpperCamelCase, required=_UpperCamelCase )
expected.add_argument('''--bar''', type=_UpperCamelCase, required=_UpperCamelCase )
expected.add_argument('''--baz''', type=_UpperCamelCase, required=_UpperCamelCase )
expected.add_argument('''--flag''', type=_UpperCamelCase, default=_UpperCamelCase, const=_UpperCamelCase, nargs='''?''' )
self.argparsersEqual(_UpperCamelCase, _UpperCamelCase )
lowerCAmelCase_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((lowerCAmelCase_ ) , ) = parser.parse_args_into_dataclasses(_UpperCamelCase, look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''', default=42, type=_UpperCamelCase )
expected.add_argument('''--baz''', default='''toto''', type=_UpperCamelCase, help='''help message''' )
self.argparsersEqual(_UpperCamelCase, _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''', type=_UpperCamelCase, default=_UpperCamelCase, const=_UpperCamelCase, nargs='''?''' )
expected.add_argument('''--baz''', type=_UpperCamelCase, default=_UpperCamelCase, const=_UpperCamelCase, nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''', action='''store_false''', default=_UpperCamelCase, dest='''baz''' )
expected.add_argument('''--opt''', type=_UpperCamelCase, default=_UpperCamelCase )
lowerCAmelCase_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase, _UpperCamelCase )
lowerCAmelCase_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase, Namespace(foo=_UpperCamelCase, baz=_UpperCamelCase, opt=_UpperCamelCase ) )
lowerCAmelCase_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase, Namespace(foo=_UpperCamelCase, baz=_UpperCamelCase, opt=_UpperCamelCase ) )
lowerCAmelCase_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase, Namespace(foo=_UpperCamelCase, baz=_UpperCamelCase, opt=_UpperCamelCase ) )
lowerCAmelCase_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase, Namespace(foo=_UpperCamelCase, baz=_UpperCamelCase, opt=_UpperCamelCase ) )
lowerCAmelCase_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase, Namespace(foo=_UpperCamelCase, baz=_UpperCamelCase, opt=_UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''', default='''toto''', choices=['''titi''', '''toto''', 42], type=make_choice_type_function(['''titi''', '''toto''', 42] ), )
self.argparsersEqual(_UpperCamelCase, _UpperCamelCase )
lowerCAmelCase_ = parser.parse_args([] )
self.assertEqual(args.foo, '''toto''' )
lowerCAmelCase_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.toto )
lowerCAmelCase_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo, '''titi''' )
lowerCAmelCase_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.titi )
lowerCAmelCase_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo, 42 )
lowerCAmelCase_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.fourtytwo )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
@dataclass
class A :
__snake_case = "toto"
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''', default='''toto''', choices=('''titi''', '''toto''', 42), type=make_choice_type_function(['''titi''', '''toto''', 42] ), )
self.argparsersEqual(_UpperCamelCase, _UpperCamelCase )
lowerCAmelCase_ = parser.parse_args([] )
self.assertEqual(args.foo, '''toto''' )
lowerCAmelCase_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo, '''titi''' )
lowerCAmelCase_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo, 42 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''', nargs='''+''', default=[], type=_UpperCamelCase )
expected.add_argument('''--bar_int''', nargs='''+''', default=[1, 2, 3], type=_UpperCamelCase )
expected.add_argument('''--foo_str''', nargs='''+''', default=['''Hallo''', '''Bonjour''', '''Hello'''], type=_UpperCamelCase )
expected.add_argument('''--foo_float''', nargs='''+''', default=[0.1, 0.2, 0.3], type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase, _UpperCamelCase )
lowerCAmelCase_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase, Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=['''Hallo''', '''Bonjour''', '''Hello'''], foo_float=[0.1, 0.2, 0.3] ), )
lowerCAmelCase_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=['''a''', '''b''', '''c'''], foo_float=[0.1, 0.7] ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''', default=_UpperCamelCase, type=_UpperCamelCase )
expected.add_argument('''--bar''', default=_UpperCamelCase, type=_UpperCamelCase, help='''help message''' )
expected.add_argument('''--baz''', default=_UpperCamelCase, type=_UpperCamelCase )
expected.add_argument('''--ces''', nargs='''+''', default=[], type=_UpperCamelCase )
expected.add_argument('''--des''', nargs='''+''', default=[], type=_UpperCamelCase )
lowerCAmelCase_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase, _UpperCamelCase )
lowerCAmelCase_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase, Namespace(foo=_UpperCamelCase, bar=_UpperCamelCase, baz=_UpperCamelCase, ces=[], des=[] ) )
lowerCAmelCase_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase, Namespace(foo=12, bar=3.14, baz='''42''', ces=['''a''', '''b''', '''c'''], des=[1, 2, 3] ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''', nargs='''+''', type=_UpperCamelCase, required=_UpperCamelCase )
expected.add_argument('''--required_str''', type=_UpperCamelCase, required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''', type=make_choice_type_function(['''titi''', '''toto'''] ), choices=['''titi''', '''toto'''], required=_UpperCamelCase, )
self.argparsersEqual(_UpperCamelCase, _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''', type=_UpperCamelCase, required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''', type=make_choice_type_function(['''titi''', '''toto'''] ), choices=['''titi''', '''toto'''], required=_UpperCamelCase, )
expected.add_argument('''--opt''', type=_UpperCamelCase, default=_UpperCamelCase )
expected.add_argument('''--baz''', default='''toto''', type=_UpperCamelCase, help='''help message''' )
expected.add_argument('''--foo_str''', nargs='''+''', default=['''Hallo''', '''Bonjour''', '''Hello'''], type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase, _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
lowerCAmelCase_ = parser.parse_dict(_UpperCamelCase )[0]
lowerCAmelCase_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase, _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_UpperCamelCase, parser.parse_dict, _UpperCamelCase, allow_extra_keys=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = os.path.join(_UpperCamelCase, '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''', '''w+''' ) as f:
json.dump(_UpperCamelCase, _UpperCamelCase )
lowerCAmelCase_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
lowerCAmelCase_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase, _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
lowerCAmelCase_ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = os.path.join(_UpperCamelCase, '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''', '''w+''' ) as f:
yaml.dump(_UpperCamelCase, _UpperCamelCase )
lowerCAmelCase_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
lowerCAmelCase_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase, _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
| 431 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : Tuple = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=8 ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( __A ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : UNetaDConditionModel , a__ : DDPMScheduler , a__ : VQModel , ):
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : Optional[int] , a__ : str , a__ : List[Any] , a__ : int , a__ : Union[str, Any] , a__ : str , a__ : Tuple ):
if latents is None:
UpperCAmelCase = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase = latents.to(_UpperCamelCase )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : List[Any] , a__ : Union[str, Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( self : str , a__ : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase, UpperCAmelCase = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : List[str] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self : Any , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : int = 512 , a__ : int = 512 , a__ : int = 100 , a__ : float = 4.0 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , ):
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.unet.config.in_channels
UpperCAmelCase, UpperCAmelCase = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'''image_embeds''': image_embeds}
UpperCAmelCase = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase, UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase, UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 51 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
import numpy as np
def a__ ( snake_case__ : int ):
return 1 / (1 + np.exp(-vector ))
def a__ ( snake_case__ : Any ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : int=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : str=None , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : str ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] ) ->Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
snake_case_ = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , *_UpperCamelCase : List[Any] ) ->Union[str, Any]:
snake_case_ = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = self.seq_length // 2
snake_case_ = 0
# first forward pass
snake_case_, snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case_ = ids_tensor((1,) , _UpperCamelCase ).item() + 1
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case_ = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) ->int:
snake_case_ = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
snake_case_ = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False ) ->Dict:
snake_case_ = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , *_UpperCamelCase : Dict ) ->Dict:
snake_case_ = BioGptModel(_UpperCamelCase )
snake_case_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , *_UpperCamelCase : List[str] ) ->int:
snake_case_ = self.num_labels
snake_case_ = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = BioGptModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : str ) ->int:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = '''left'''
# Define PAD Token = EOS Token = 50256
snake_case_ = tokenizer.eos_token
snake_case_ = model.config.eos_token_id
# use different length sentences to test batching
snake_case_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase )
snake_case_ = inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(_UpperCamelCase ) , )
snake_case_ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase )
snake_case_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
snake_case_ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
snake_case_ = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__( self : Optional[int] ) ->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = '''multi_label_classification'''
snake_case_ = input_dict['''input_ids''']
snake_case_ = input_ids.ne(1 ).to(_UpperCamelCase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : int ) ->Any:
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
snake_case_ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 4_2_3_8_4
snake_case_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_UpperCamelCase )
snake_case_ = model.generate(
**_UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCamelCase , )
snake_case_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
snake_case_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
def __UpperCamelCase (lowerCAmelCase : int ) -> int:
if any(not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__, sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 699 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ = (boundary[1] - boundary[0]) / steps
snake_case_ = boundary[0]
snake_case_ = boundary[1]
snake_case_ = make_points(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE__ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ )
return y
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = a + h
while x < (b - h):
yield x
snake_case_ = x + h
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # enter your function here
snake_case_ = (x - 0) * (x - 0)
return y
def __SCREAMING_SNAKE_CASE ():
snake_case_ = 0.0 # Lower bound of integration
snake_case_ = 1.0 # Upper bound of integration
snake_case_ = 10.0 # define number of steps or resolution
snake_case_ = [a, b] # define boundary of integration
snake_case_ = method_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main() | 39 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase : List[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowercase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase : Optional[int] = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict , __A : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
snake_case : Any = True
# Deal with multi-line cases
elif (
re.search(
rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , SCREAMING_SNAKE_CASE__ , )
is not None
):
snake_case : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
snake_case : str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
snake_case : Tuple = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
snake_case : Optional[Any] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
snake_case : Any = True
if not attribute_used:
snake_case : Tuple = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
snake_case : Any = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
snake_case : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
snake_case : Union[str, Any] = True
elif attribute.endswith("""_token_id""" ):
snake_case : Dict = True
# configuration class specific cases
if not case_allowed:
snake_case : Optional[int] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
snake_case : str = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase ( __A : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : Any = dict(inspect.signature(config_class.__init__ ).parameters )
snake_case : List[str] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
snake_case : Dict = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
snake_case : Optional[Any] = {}
if len(config_class.attribute_map ) > 0:
snake_case : str = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
snake_case : Optional[int] = inspect.getsourcefile(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = os.path.dirname(SCREAMING_SNAKE_CASE__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
snake_case : Dict = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for fn in os.listdir(SCREAMING_SNAKE_CASE__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
snake_case : int = []
for path in modeling_paths:
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ ) as fp:
modeling_sources.append(fp.read() )
snake_case : Union[str, Any] = []
for config_param, default_value in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# `attributes` here is all the variant names for `config_param`
snake_case : str = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
unused_attributes.append(attributes[0] )
return sorted(SCREAMING_SNAKE_CASE__ )
def lowercase ( ) -> List[str]:
'''simple docstring'''
snake_case : Any = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
snake_case : Optional[int] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(SCREAMING_SNAKE_CASE__ )
and issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and inspect.getmodule(SCREAMING_SNAKE_CASE__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
snake_case : Tuple = check_config_attributes_being_used(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case : Any = unused_attributes
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case : int = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
check_config_attributes()
| 36 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 39 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase_ = logging.getLogger()
def _lowerCAmelCase ( ) -> Optional[int]:
lowercase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase : Dict =parser.parse_args()
return args.f
def _lowerCAmelCase ( __magic_name__ : Dict ) -> int:
lowercase : int ={}
lowercase : List[Any] =os.path.join(SCREAMING_SNAKE_CASE__ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
lowercase : Tuple =json.load(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def _lowerCAmelCase ( ) -> Union[str, Any]:
lowercase : Optional[int] =torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( __A ):
@classmethod
def lowerCamelCase_ ( cls : Tuple ):
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowercase : List[Any] =tempfile.mkdtemp()
lowercase : List[Any] =os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase : int =['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls : List[Any] ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_auto_remove_tmp_dir()
lowercase : List[Any] =F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase : Optional[Any] =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.get_auto_remove_tmp_dir()
lowercase : Any =F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase : str =get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.get_auto_remove_tmp_dir()
lowercase : List[Any] =F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : List[str] =get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowercase : Tuple =7 if get_gpu_count() > 1 else 2
lowercase : int =self.get_auto_remove_tmp_dir()
lowercase : List[Any] =F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : int =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] =self.get_auto_remove_tmp_dir()
lowercase : Any =F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : Any =get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Tuple =self.get_auto_remove_tmp_dir()
lowercase : int =F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : str =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.get_auto_remove_tmp_dir()
lowercase : Optional[int] =F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : Dict =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any =self.get_auto_remove_tmp_dir()
lowercase : Optional[int] =F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : str =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
lowercase : List[Any] =self.get_auto_remove_tmp_dir()
lowercase : List[Any] =F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
lowercase : Optional[Any] =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.get_auto_remove_tmp_dir()
lowercase : Optional[int] =F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase : Dict =get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''image_classification_no_trainer''' ) ) )
| 92 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 | 0 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : str=2 , __magic_name__ : Dict=8 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict=True , __magic_name__ : Optional[int]=99 , __magic_name__ : Any=16 , __magic_name__ : List[str]=5 , __magic_name__ : Optional[Any]=2 , __magic_name__ : str=36 , __magic_name__ : Dict="gelu" , __magic_name__ : Optional[int]=0.0 , __magic_name__ : str=0.0 , __magic_name__ : Any=5_12 , __magic_name__ : int=16 , __magic_name__ : List[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : Any=3 , __magic_name__ : Dict=4 , __magic_name__ : Dict=None , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : Optional[Any] = is_training
__snake_case : Union[str, Any] = use_input_mask
__snake_case : Optional[int] = use_token_type_ids
__snake_case : List[str] = use_labels
__snake_case : Optional[Any] = vocab_size
__snake_case : str = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[int] = num_labels
__snake_case : Tuple = num_choices
__snake_case : Any = scope
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Tuple = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : int = None
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = self.get_config()
__snake_case : int = 3_00
return config
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.prepare_config_and_inputs()
__snake_case : Tuple = True
__snake_case : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Any = MraModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : str = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
__snake_case : Union[str, Any] = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
__snake_case : Union[str, Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = True
__snake_case : List[Any] = MraModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : str = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
__snake_case : Optional[int] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
__snake_case : Optional[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case : List[str] = MraForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> Any:
"""simple docstring"""
__snake_case : List[str] = MraForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : Any = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : int = MraForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Optional[int] ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.num_labels
__snake_case : List[Any] = MraForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Any ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_choices
__snake_case : List[str] = MraForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__snake_case : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : str = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Tuple = config_and_inputs
__snake_case : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __A , unittest.TestCase ):
lowercase__: str = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__: Optional[int] = False
lowercase__: str = False
lowercase__: Optional[int] = False
lowercase__: Optional[int] = False
lowercase__: List[str] = ()
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = MraModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Optional[int] = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def lowercase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = MraModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return
@require_torch
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__snake_case : Tuple = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
__snake_case : int = model(_UpperCamelCase )[0]
__snake_case : int = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , _UpperCamelCase )
__snake_case : List[str] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__snake_case : List[str] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
__snake_case : Any = model(_UpperCamelCase )[0]
__snake_case : Dict = 5_02_65
__snake_case : Union[str, Any] = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
__snake_case : Any = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Tuple = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__snake_case : Optional[int] = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
__snake_case : List[Any] = model(_UpperCamelCase )[0]
__snake_case : Optional[Any] = 5_02_65
__snake_case : Tuple = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
__snake_case : Any = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 26 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase = False
_lowercase = False
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Any =super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
SCREAMING_SNAKE_CASE_ : List[Any] =parent
SCREAMING_SNAKE_CASE_ : List[str] =batch_size
SCREAMING_SNAKE_CASE_ : int =seq_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_ : Dict =use_input_mask
SCREAMING_SNAKE_CASE_ : Dict =use_token_type_ids
SCREAMING_SNAKE_CASE_ : List[Any] =use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE_ : int =hidden_size
SCREAMING_SNAKE_CASE_ : int =num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict =intermediate_size
SCREAMING_SNAKE_CASE_ : str =hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Any =initializer_range
SCREAMING_SNAKE_CASE_ : int =num_labels
SCREAMING_SNAKE_CASE_ : List[Any] =num_choices
SCREAMING_SNAKE_CASE_ : int =scope
SCREAMING_SNAKE_CASE_ : str =embedding_size
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[str] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : str =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] =None
SCREAMING_SNAKE_CASE_ : List[Any] =None
SCREAMING_SNAKE_CASE_ : Tuple =None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : int =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict =TFMobileBertModel(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : str =model(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] =[input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Optional[Any] =model(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] =model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] =TFMobileBertForMaskedLM(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : Dict =model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str =TFMobileBertForNextSentencePrediction(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : List[Any] =model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =TFMobileBertForPreTraining(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : Dict =model(_UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : int =self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TFMobileBertForSequenceClassification(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : List[Any] =model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str =self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] =TFMobileBertForMultipleChoice(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int =tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : str =tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : List[str] =tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : List[str] ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any =model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] =self.num_labels
SCREAMING_SNAKE_CASE_ : Any =TFMobileBertForTokenClassification(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : Optional[Any] =model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] =TFMobileBertForQuestionAnswering(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : str ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : int =model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : int =config_and_inputs
SCREAMING_SNAKE_CASE_ : List[str] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =TFMobileBertModelTest.TFMobileBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Dict =ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCamelCase )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCamelCase )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCamelCase )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCamelCase )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCamelCase )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCamelCase )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCamelCase )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCamelCase )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =TFMobileBertModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Dict =TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
SCREAMING_SNAKE_CASE_ : Dict =tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Optional[int] =model(_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] =[1, 6, 30_522]
self.assertEqual(output.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict =tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 )
| 220 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A_(unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=None , A=True , ):
_lowerCamelCase : str = size if size is not None else {'shortest_edge': 20}
_lowerCamelCase : Dict = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : int = min_resolution
_lowerCamelCase : int = max_resolution
_lowerCamelCase : int = do_resize
_lowerCamelCase : List[Any] = size
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[int] = crop_size
_lowerCamelCase : Optional[Any] = do_flip_channel_order
def _lowerCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A_(__A , unittest.TestCase ):
"""simple docstring"""
a_ : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = MobileViTImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_flip_channel_order' ) )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCamelCase : Union[str, Any] = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCamelCase : Tuple = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCamelCase : Tuple = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 437 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCamelCase : Union[str, Any] = model_type_to_module_name(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Any = importlib.import_module(F'''.{module_name}''' ,"transformers.models" )
try:
return getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE__ ,"__name__" ,SCREAMING_SNAKE_CASE__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCamelCase : Tuple = importlib.import_module("transformers" )
if hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return None
def lowercase__ ( lowercase_ ,lowercase_ = None ,lowercase_ = False ,lowercase_ = False ,lowercase_ = None ,lowercase_ = None ,lowercase_ = None ,lowercase_ = False ,**lowercase_ ,) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : int = get_file_from_repo(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,cache_dir=SCREAMING_SNAKE_CASE__ ,force_download=SCREAMING_SNAKE_CASE__ ,resume_download=SCREAMING_SNAKE_CASE__ ,proxies=SCREAMING_SNAKE_CASE__ ,use_auth_token=SCREAMING_SNAKE_CASE__ ,revision=SCREAMING_SNAKE_CASE__ ,local_files_only=SCREAMING_SNAKE_CASE__ ,)
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(SCREAMING_SNAKE_CASE__ ,encoding="utf-8" ) as reader:
return json.load(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str ) -> Optional[int]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( cls : str , __a : List[Any] , **__a : Optional[int] ) -> List[str]:
_UpperCamelCase : List[Any] = kwargs.pop("config" , _UpperCamelCase )
_UpperCamelCase : List[str] = kwargs.pop("trust_remote_code" , _UpperCamelCase )
_UpperCamelCase : Tuple = True
_UpperCamelCase, _UpperCamelCase : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_UpperCamelCase , **_UpperCamelCase )
_UpperCamelCase : Any = config_dict.get("feature_extractor_type" , _UpperCamelCase )
_UpperCamelCase : List[str] = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_UpperCamelCase : List[str] = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.feature_extractor_type``
_UpperCamelCase : Tuple = getattr(_UpperCamelCase , "feature_extractor_type" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
_UpperCamelCase : List[Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
_UpperCamelCase : List[Any] = feature_extractor_class_from_name(_UpperCamelCase )
_UpperCamelCase : Any = feature_extractor_auto_map is not None
_UpperCamelCase : str = feature_extractor_class is not None or type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING
_UpperCamelCase : Optional[int] = resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
_UpperCamelCase : Optional[int] = get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
_UpperCamelCase : Optional[Any] = kwargs.pop("code_revision" , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
_UpperCamelCase : List[str] = FEATURE_EXTRACTOR_MAPPING[type(_UpperCamelCase )]
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Optional[int] , __a : Optional[int] ) -> Dict:
FEATURE_EXTRACTOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
| 624 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
import re
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 404 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_A = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_A = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_A = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_A = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_A = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __UpperCamelCase ( _A , _A ):
for tf_name, hf_name in patterns:
lowerCAmelCase_ = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = BigBirdPegasusConfig(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = BigBirdPegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = torch_model.state_dict()
lowerCAmelCase_ = {}
# separating decoder weights
lowerCAmelCase_ = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
lowerCAmelCase_ = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
lowerCAmelCase_ = [k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
lowerCAmelCase_ = DECODER_PATTERNS
lowerCAmelCase_ = rename_state_dict_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
lowerCAmelCase_ = v.T
lowerCAmelCase_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
lowerCAmelCase_ = [k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
lowerCAmelCase_ = REMAINING_PATTERNS
lowerCAmelCase_ = rename_state_dict_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
lowerCAmelCase_ = v.T
lowerCAmelCase_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
lowerCAmelCase_ = mapping['''model.embed_positions.weight''']
lowerCAmelCase_ = mapping.pop('''model.embed_positions.weight''' )
lowerCAmelCase_ , lowerCAmelCase_ = torch_model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = {}
lowerCAmelCase_ = ['''global_step''']
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc='''converting tf checkpoint to dict''' ):
lowerCAmelCase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase_ = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = array
return tf_weights
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = convert_bigbird_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A = parser.parse_args()
_A = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 431 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : Dict = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
def a__ ( snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
_UpperCAmelCase : str = [0 for i in range(r + 1 )]
# nc0 = 1
_UpperCAmelCase : Optional[int] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_UpperCAmelCase : Optional[int] = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 643 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : int ) -> Union[str, Any]:
A = f'''{sampling_rate}'''
A = '1'
A = 'f32le'
A = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE__, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
A = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
A = output_stream[0]
A = np.frombuffer(SCREAMING_SNAKE_CASE__, np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : Optional[Any] = "f32le", ) -> Any:
A = f'''{sampling_rate}'''
A = '1'
if format_for_conversion == "s16le":
A = 2
elif format_for_conversion == "f32le":
A = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
A = platform.system()
if system == "Linux":
A = 'alsa'
A = 'default'
elif system == "Darwin":
A = 'avfoundation'
A = ':0'
elif system == "Windows":
A = 'dshow'
A = 'default'
A = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
A = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
A = _ffmpeg_stream(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for item in iterator:
yield item
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : List[Any], lowerCAmelCase : List[str] = None, lowerCAmelCase : Dict = None, lowerCAmelCase : Union[str, Any] = "f32le", ) -> Optional[int]:
if stream_chunk_s is not None:
A = stream_chunk_s
else:
A = chunk_length_s
A = ffmpeg_microphone(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, format_for_conversion=SCREAMING_SNAKE_CASE__ )
if format_for_conversion == "s16le":
A = np.intaa
A = 2
elif format_for_conversion == "f32le":
A = np.floataa
A = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
A = chunk_length_s / 6
A = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE__, (int, float) ):
A = [stride_length_s, stride_length_s]
A = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
A = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
A = datetime.datetime.now()
A = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE__ )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, stride=(stride_left, stride_right), stream=SCREAMING_SNAKE_CASE__ ):
# Put everything back in numpy scale
A = np.frombuffer(item['raw'], dtype=SCREAMING_SNAKE_CASE__ )
A = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
A = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : List[str], lowerCAmelCase : List[Any], lowerCAmelCase : Optional[int] = False ) -> Optional[int]:
A = b''
A , A = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
A = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE__ ) < chunk_len:
A = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE__ ) >= chunk_len:
# We are flushing the accumulator
A = (_stride_left, stride_right)
A = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
A = False
yield item
A = stride_left
A = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE__ ) > stride_left:
A = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
A = False
yield item
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict ) -> Optional[int]:
A = 2**24 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE__, stdout=subprocess.PIPE, bufsize=SCREAMING_SNAKE_CASE__ ) as ffmpeg_process:
while True:
A = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 699 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) ) | 39 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowercase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowercase : Tuple = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,"""models/bert/""" ) )
snake_case : Tuple = self.transformer_dir
shutil.copy(
os.path.join(_UpperCamelCase ,"""src/transformers/models/bert/modeling_bert.py""" ) ,os.path.join(self.transformer_dir ,"""models/bert/modeling_bert.py""" ) ,)
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
snake_case : Dict = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
snake_case : Optional[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
snake_case : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
snake_case : List[str] = black.format_str(_UpperCamelCase ,mode=_UpperCamelCase )
snake_case : int = os.path.join(self.transformer_dir ,"""new_code.py""" )
with open(_UpperCamelCase ,"""w""" ,newline="""\n""" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=_UpperCamelCase )
with open(_UpperCamelCase ,"""r""" ) as f:
self.assertTrue(f.read() ,_UpperCamelCase )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
def snake_case_ ( self ):
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" ,"""BertLMPredictionHead""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" ,"""BertLMPredictionHead""" ,_UpperCamelCase ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" ,"""TestModelLMPredictionHead""" ,re.sub("""Bert""" ,"""TestModel""" ,_UpperCamelCase ) ,)
# Copy consistency with a really long name
snake_case : Tuple = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" ,F"""{long_class_name}LMPredictionHead""" ,re.sub("""Bert""" ,_UpperCamelCase ,_UpperCamelCase ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" ,"""TestModelLMPredictionHead""" ,_UpperCamelCase ,overwrite_result=re.sub("""Bert""" ,"""TestModel""" ,_UpperCamelCase ) ,)
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
snake_case : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
snake_case : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
snake_case : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
snake_case , snake_case : Dict = check_copies.convert_to_localized_md(
_UpperCamelCase ,_UpperCamelCase ,localized_readme["""format_model_list"""] )
self.assertFalse(_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
snake_case , snake_case : Tuple = check_copies.convert_to_localized_md(
_UpperCamelCase ,_UpperCamelCase ,localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCamelCase )
snake_case : int = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
snake_case : List[str] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
snake_case : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
snake_case , snake_case : Tuple = check_copies.convert_to_localized_md(
_UpperCamelCase ,_UpperCamelCase ,localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
| 36 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 39 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __SCREAMING_SNAKE_CASE ( __A ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = None
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : Optional[int]="<s>" , UpperCAmelCase__ : Dict="</s>" , UpperCAmelCase__ : Dict="<pad>" , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Any=False , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , **_UpperCamelCase , )
lowercase : str =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCamelCase ) != add_prefix_space:
lowercase : Optional[Any] =getattr(_UpperCamelCase , pre_tok_state.pop('''type''' ) )
lowercase : Dict =add_prefix_space
lowercase : List[str] =pre_tok_class(**_UpperCamelCase )
lowercase : List[str] =add_prefix_space
def lowerCamelCase_ ( self : Union[str, Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : List[Any] =kwargs.get('''is_split_into_words''' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def lowerCamelCase_ ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : List[Any] =kwargs.get('''is_split_into_words''' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ):
'''simple docstring'''
lowercase : str =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
lowercase : int =input_ids[-self.model_max_length :]
return input_ids
| 92 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _a ( _lowerCamelCase = "laptop" ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = F'''https://www.amazon.in/laptop/s?k={product}'''
__snake_case : List[Any] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
__snake_case : Dict = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).text )
# Initialize a Pandas dataframe with the column titles
__snake_case : Dict = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
__snake_case : Any = item.ha.text
__snake_case : Optional[int] = """https://www.amazon.in/""" + item.ha.a["""href"""]
__snake_case : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
__snake_case : List[Any] = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
__snake_case : Any = """Not available"""
try:
__snake_case : Optional[int] = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
__snake_case : List[Any] = """"""
try:
__snake_case : List[Any] = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
__snake_case : int = float("""nan""" )
except AttributeError:
pass
__snake_case : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__snake_case : List[Any] = """ """
__snake_case : Optional[Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCamelCase = "headphones"
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = ["image_processor", "tokenizer"]
_lowercase = "BridgeTowerImageProcessor"
_lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Any =self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ : Any =self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : str =self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : int =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 220 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A_:
"""simple docstring"""
def __init__( self , A , ):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : List[Any] = 13
_lowerCamelCase : Any = 7
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Dict = False
_lowerCamelCase : int = True
_lowerCamelCase : int = 99
_lowerCamelCase : Any = 32
_lowerCamelCase : Tuple = 2
_lowerCamelCase : Union[str, Any] = 4
_lowerCamelCase : List[str] = 37
_lowerCamelCase : Any = 'gelu'
_lowerCamelCase : Optional[Any] = 0.1
_lowerCamelCase : Dict = 0.1
_lowerCamelCase : Optional[int] = 512
_lowerCamelCase : Optional[Any] = 16
_lowerCamelCase : Union[str, Any] = 2
_lowerCamelCase : Union[str, Any] = 0.0_2
_lowerCamelCase : Dict = 3
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : List[str] = None
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : str = None
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self , A , A , A , A , A , A ):
_lowerCamelCase : str = TFDistilBertModel(config=_UpperCamelCase )
_lowerCamelCase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCamelCase : Tuple = model(_UpperCamelCase )
_lowerCamelCase : Union[str, Any] = [input_ids, input_mask]
_lowerCamelCase : Dict = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , A , A , A , A , A , A ):
_lowerCamelCase : int = TFDistilBertForMaskedLM(config=_UpperCamelCase )
_lowerCamelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCamelCase : Tuple = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , A , A , A , A , A , A ):
_lowerCamelCase : Dict = TFDistilBertForQuestionAnswering(config=_UpperCamelCase )
_lowerCamelCase : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
_lowerCamelCase : List[Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self , A , A , A , A , A , A ):
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : str = TFDistilBertForSequenceClassification(_UpperCamelCase )
_lowerCamelCase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCamelCase : Optional[int] = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self , A , A , A , A , A , A ):
_lowerCamelCase : Union[str, Any] = self.num_choices
_lowerCamelCase : Optional[Any] = TFDistilBertForMultipleChoice(_UpperCamelCase )
_lowerCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : Dict = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : List[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
_lowerCamelCase : Optional[Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self , A , A , A , A , A , A ):
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : Union[str, Any] = TFDistilBertForTokenClassification(_UpperCamelCase )
_lowerCamelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCamelCase : List[str] = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) : List[Any] = config_and_inputs
_lowerCamelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A_(__A , __A , unittest.TestCase ):
"""simple docstring"""
a_ : int = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ : List[Any] = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Dict = False
a_ : Optional[Any] = False
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = TFDistilBertModelTester(self )
_lowerCamelCase : int = ConfigTester(self , config_class=_UpperCamelCase , dim=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_UpperCamelCase )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_UpperCamelCase )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_UpperCamelCase )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_UpperCamelCase )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_UpperCamelCase )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_UpperCamelCase )
@slow
def _lowerCAmelCase ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_lowerCamelCase : List[Any] = TFDistilBertModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_tf
class A_(unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
_lowerCamelCase : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase : int = model(_UpperCamelCase )[0]
_lowerCamelCase : Any = [1, 6, 768]
self.assertEqual(output.shape , _UpperCamelCase )
_lowerCamelCase : List[Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 )
| 437 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase__ = datasets.logging.get_logger(__name__)
lowerCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n"
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=False ,lowercase_=False ,lowercase_=True ,lowercase_=False ,lowercase_="dummy_doc" ) -> str:
"""simple docstring"""
_UpperCamelCase : int = {doc: key_lines}
_UpperCamelCase : int = {doc: sys_lines}
_UpperCamelCase : Optional[Any] = {}
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Dict = 0
_UpperCamelCase : Dict = 0
_UpperCamelCase : List[str] = 0
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Tuple = 0
_UpperCamelCase, _UpperCamelCase : str = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ ,key_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ )
key_singletons_num += singletons_num
if NP_only or min_span:
_UpperCamelCase : Union[str, Any] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ ,key_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase, _UpperCamelCase : Dict = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ ,sys_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_UpperCamelCase : Any = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ ,key_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if remove_nested:
_UpperCamelCase, _UpperCamelCase : List[Any] = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_UpperCamelCase, _UpperCamelCase : Any = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_UpperCamelCase : Tuple = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Union[str, Any] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : str = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = get_coref_infos(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Tuple = 0
_UpperCamelCase : List[str] = 0
for name, metric in metrics:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Tuple = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) ,F'''Recall: {recall * 100:.2f}''' ,F''' Precision: {precision * 100:.2f}''' ,F''' F1: {fa * 100:.2f}''' ,)
if conll_subparts_num == 3:
_UpperCamelCase : Union[str, Any] = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
_UpperCamelCase : int = line.split()[5]
if not parse_col == "-":
_UpperCamelCase : Union[str, Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : int , __a : Union[str, Any] , __a : Optional[Any]=True , __a : int=False , __a : Optional[Any]=False , __a : int=False ) -> Tuple:
_UpperCamelCase : List[str] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
_UpperCamelCase : Any = util.check_gold_parse_annotation(_UpperCamelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use \'min_span\'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_UpperCamelCase : Optional[int] = evaluate(
key_lines=_UpperCamelCase , sys_lines=_UpperCamelCase , metrics=_UpperCamelCase , NP_only=_UpperCamelCase , remove_nested=_UpperCamelCase , keep_singletons=_UpperCamelCase , min_span=_UpperCamelCase , )
return score
| 624 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 404 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) | 39 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 431 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 39 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Optional[int] = logging.getLogger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
"""simple docstring"""
if metric == "rouge2":
UpperCAmelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
UpperCAmelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
UpperCAmelCase = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
UpperCAmelCase = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , monitor=f"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> Any:
"""simple docstring"""
return EarlyStopping(
monitor=f"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , )
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def __snake_case ( self : Dict , a__ : Union[str, Any] , a__ : List[str] ):
UpperCAmelCase = {f"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_UpperCamelCase )
@rank_zero_only
def __snake_case ( self : Union[str, Any] , a__ : pl.Trainer , a__ : pl.LightningModule , a__ : str , a__ : List[str]=True ):
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase = od / '''test_results.txt'''
UpperCAmelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
UpperCAmelCase = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=_UpperCamelCase )
generations_file.parent.mkdir(exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , '''a+''' ) as writer:
for key in sorted(_UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase = metrics[key]
if isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase = val.item()
UpperCAmelCase = f"{key}: {val:.6f}\n"
writer.write(_UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_UpperCamelCase )
@rank_zero_only
def __snake_case ( self : List[str] , a__ : int , a__ : str ):
try:
UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase = pl_module.model.num_parameters()
UpperCAmelCase = count_trainable_parameters(_UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __snake_case ( self : Tuple , a__ : pl.Trainer , a__ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_UpperCamelCase , _UpperCamelCase , '''test''' )
@rank_zero_only
def __snake_case ( self : Tuple , a__ : pl.Trainer , a__ : Tuple ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 51 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.