code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : List[str] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowerCamelCase : Any = 1_0
lowerCamelCase : Tuple = 2_5_6
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[MinHash]:
"""simple docstring"""
if len(_UpperCamelCase ) < MIN_NUM_TOKENS:
return None
_SCREAMING_SNAKE_CASE =MinHash(num_perm=_UpperCamelCase )
for token in set(_UpperCamelCase ):
min_hash.update(token.encode() )
return min_hash
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(_UpperCamelCase ) if len(t.strip() ) > 0}
class A__ :
def __init__( self : Dict , *,
_a : float = 0.85 , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =duplication_jaccard_threshold
_SCREAMING_SNAKE_CASE =NUM_PERM
_SCREAMING_SNAKE_CASE =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_SCREAMING_SNAKE_CASE =defaultdict(_a )
def A ( self : Tuple , _a : Tuple , _a : MinHash ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._index.query(_a )
if code_key in self._index.keys:
print(f"Duplicate key {code_key}" )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def A ( self : int ) -> List[List[Dict]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for base, duplicates in self._duplicate_clusters.items():
_SCREAMING_SNAKE_CASE =[base] + list(_a )
# reformat the cluster to be a list of dict
_SCREAMING_SNAKE_CASE =[{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def A ( self : Union[str, Any] , _a : Optional[Any] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_duplicate_clusters()
with open(_a , 'w' ) as f:
json.dump(_a , _a )
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =element
_SCREAMING_SNAKE_CASE =get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _lowerCAmelCase ( _UpperCamelCase : Type[Dataset] ) -> List[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCamelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _lowerCAmelCase ( _UpperCamelCase : Type[Dataset] , _UpperCamelCase : float ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DuplicationIndex(duplication_jaccard_threshold=_UpperCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCamelCase ) ) , max_queue_size=1_00 ) ):
di.add(_UpperCamelCase , _UpperCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_tokens(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =get_tokens(_UpperCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : Union[str, Any] = None
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for elementa in cluster:
_SCREAMING_SNAKE_CASE =_shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_SCREAMING_SNAKE_CASE =_shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_UpperCamelCase , _UpperCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_SCREAMING_SNAKE_CASE =1
extremes.append(_UpperCamelCase )
return extremes
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
global _shared_dataset
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCamelCase , _UpperCamelCase , ) , total=len(_UpperCamelCase ) , ):
extremes_list.append(_UpperCamelCase )
return extremes_list
def _lowerCAmelCase ( _UpperCamelCase : Type[Dataset] , _UpperCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =make_duplicate_clusters(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE ={x['base_index'] for cluster in duplicate_clusters for x in cluster}
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =find_extremes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_SCREAMING_SNAKE_CASE =element
_SCREAMING_SNAKE_CASE =duplicate_indices - set(extreme_dict.keys() )
_SCREAMING_SNAKE_CASE =dataset.filter(lambda _UpperCamelCase , _UpperCamelCase : idx not in remove_indices , with_indices=_UpperCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_SCREAMING_SNAKE_CASE =element['base_index'] in extreme_dict
if element["is_extreme"]:
_SCREAMING_SNAKE_CASE =extreme_dict[element['base_index']]['copies']
print(f"Original dataset size: {len(_UpperCamelCase )}" )
print(f"Number of duplicate clusters: {len(_UpperCamelCase )}" )
print(f"Files in duplicate cluster: {len(_UpperCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_UpperCamelCase )}" )
print(f"Filtered dataset size: {len(_UpperCamelCase )}" )
return ds_filter, duplicate_clusters
| 47 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class A__ ( A__ , A__ ):
A__ = 'convnextv2'
def __init__( self : Tuple , _a : Optional[int]=3 , _a : Any=4 , _a : int=4 , _a : Union[str, Any]=None , _a : List[str]=None , _a : Optional[Any]="gelu" , _a : Any=0.02 , _a : Any=1e-12 , _a : Tuple=0.0 , _a : int=224 , _a : Any=None , _a : Optional[int]=None , **_a : List[str] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_stages
_SCREAMING_SNAKE_CASE =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_SCREAMING_SNAKE_CASE =[3, 3, 9, 3] if depths is None else depths
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =['stem'] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 47 | 1 |
from __future__ import annotations
UpperCamelCase : str = tuple[int, int, int]
UpperCamelCase : Optional[Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCamelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCamelCase : Optional[Any] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
UpperCamelCase : Tuple = 'FOBHMDKEXQNRAULPGSJVTYICZW'
UpperCamelCase : Optional[int] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
UpperCamelCase : int = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
UpperCamelCase : List[str] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
UpperCamelCase : Dict = 'SGLCPQWZHKXAREONTFBVIYJUDM'
UpperCamelCase : str = 'HVSICLTYKQUBXDWAJZOMFGPREN'
UpperCamelCase : List[Any] = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
UpperCamelCase : Union[str, Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
UpperCamelCase : List[str] = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def A ( snake_case :RotorPositionT , snake_case :RotorSelectionT , snake_case :str ) -> List[Any]:
if (unique_rotsel := len(set(lowerCAmelCase__ ) )) < 3:
__UpperCamelCase = f'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(lowerCAmelCase__ )
# Checks if rotor positions are valid
__UpperCamelCase = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
__UpperCamelCase = f'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
__UpperCamelCase = f'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
__UpperCamelCase = f'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowerCAmelCase__ )
# Validates string and returns dict
__UpperCamelCase = _plugboard(lowerCAmelCase__ )
return rotpos, rotsel, pbdict
def A ( snake_case :str ) -> Dict:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCamelCase = f'Plugboard setting isn\'t type string ({type(lowerCAmelCase__ )})'
raise TypeError(lowerCAmelCase__ )
elif len(lowerCAmelCase__ ) % 2 != 0:
__UpperCamelCase = f'Odd number of symbols ({len(lowerCAmelCase__ )})'
raise Exception(lowerCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
__UpperCamelCase = set()
for i in pbstring:
if i not in abc:
__UpperCamelCase = f'\'{i}\' not in list of symbols'
raise Exception(lowerCAmelCase__ )
elif i in tmppbl:
__UpperCamelCase = f'Duplicate symbol ({i})'
raise Exception(lowerCAmelCase__ )
else:
tmppbl.add(lowerCAmelCase__ )
del tmppbl
# Created the dictionary
__UpperCamelCase = {}
for j in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ):
__UpperCamelCase = pbstring[j + 1]
__UpperCamelCase = pbstring[j]
return pb
def A ( snake_case :str , snake_case :RotorPositionT , snake_case :RotorSelectionT = (rotora, rotora, rotora) , snake_case :str = "" , ) -> Union[str, Any]:
__UpperCamelCase = text.upper()
__UpperCamelCase = _validator(
lowerCAmelCase__ , lowerCAmelCase__ , plugb.upper() )
__UpperCamelCase = rotor_position
__UpperCamelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__UpperCamelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__UpperCamelCase = plugboard[symbol]
# rotor ra --------------------------
__UpperCamelCase = abc.index(lowerCAmelCase__ ) + rotorposa
__UpperCamelCase = rotora[index % len(lowerCAmelCase__ )]
# rotor rb --------------------------
__UpperCamelCase = abc.index(lowerCAmelCase__ ) + rotorposa
__UpperCamelCase = rotora[index % len(lowerCAmelCase__ )]
# rotor rc --------------------------
__UpperCamelCase = abc.index(lowerCAmelCase__ ) + rotorposa
__UpperCamelCase = rotora[index % len(lowerCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__UpperCamelCase = reflector[symbol]
# 2nd rotors
__UpperCamelCase = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
__UpperCamelCase = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
__UpperCamelCase = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__UpperCamelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
__UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
__UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
__UpperCamelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase : Tuple = 'This is my Python script that emulates the Enigma machine from WWII.'
UpperCamelCase : str = (1, 1, 1)
UpperCamelCase : Dict = 'pictures'
UpperCamelCase : List[str] = (rotora, rotora, rotora)
UpperCamelCase : str = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 366 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase : List[Any] = {
"Salesforce/codegen-350M-mono": 2_0_4_8,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = CodeGenTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
if kwargs.pop('add_bos_token' , __UpperCAmelCase ):
__UpperCamelCase = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = super().decode(
token_ids=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , **__UpperCAmelCase , )
if truncate_before_pattern is not None and len(__UpperCAmelCase ) > 0:
__UpperCamelCase = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
return decoded_text
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
def find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = pattern.search(__UpperCAmelCase , __UpperCAmelCase )
return m.start() if m else -1
__UpperCamelCase = [re.compile(__UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCamelCase = list(re.finditer('^print' , __UpperCAmelCase , re.MULTILINE ) )
if len(__UpperCAmelCase ) > 1:
__UpperCamelCase = completion[: prints[1].start()]
__UpperCamelCase = list(re.finditer('^def' , __UpperCAmelCase , re.MULTILINE ) )
if len(__UpperCAmelCase ) > 1:
__UpperCamelCase = completion[: defs[1].start()]
__UpperCamelCase = 0
__UpperCamelCase = [
pos for pos in [find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(__UpperCAmelCase ) > 0:
return completion[: min(__UpperCAmelCase )]
else:
return completion
| 263 | 0 |
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(lowercase__ ,(list, tuple) ) or not all(
isinstance(lowercase__ ,lowercase__ ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ = lowerCamelCase__ = lowerCamelCase__ = numbers[0]
for i in range(1 ,len(lowercase__ ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ = numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ = min_till_now, max_till_now
lowerCamelCase__ = max(lowercase__ ,max_till_now * number )
lowerCamelCase__ = min(lowercase__ ,min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ = max(lowercase__ ,lowercase__ )
return max_prod
| 209 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> str:
_lowerCamelCase = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> dict[str, str]:
_lowerCamelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_lowerCamelCase = remove_duplicates(key.upper() )
_lowerCamelCase = len(snake_case )
# First fill cipher with key characters
_lowerCamelCase = {alphabet[i]: char for i, char in enumerate(snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(snake_case ) , 26 ):
_lowerCamelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowerCamelCase = alphabet[i - offset]
_lowerCamelCase = char
return cipher_alphabet
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : dict[str, str] )-> str:
return "".join(cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : dict[str, str] )-> str:
_lowerCamelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def SCREAMING_SNAKE_CASE_ ( )-> None:
_lowerCamelCase = input('Enter message to encode or decode: ' ).strip()
_lowerCamelCase = input('Enter keyword: ' ).strip()
_lowerCamelCase = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_lowerCamelCase = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_lowerCamelCase = create_cipher_map(snake_case )
print(func(snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowerCamelCase = [True] * (num + 1)
_lowerCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case ):
_lowerCamelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[int] =int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 80 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCAmelCase__ : Optional[int] ='''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCAmelCase__ : Dict ='''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCAmelCase__ : Optional[Any] ='''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def _A ( self , _A , _A , _A=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(_A , _A , sample_weight=_A ) ),
}
| 257 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : Optional[Any] ={
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =['''MobileNetV2FeatureExtractor''']
lowerCAmelCase__ : str =['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] =[
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 257 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : int=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=None):
'''simple docstring'''
self.set_matricies(red=_lowerCAmelCase , green=_lowerCAmelCase , blue=_lowerCAmelCase , red_edge=_lowerCAmelCase , nir=_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : int=None , _lowerCAmelCase : Optional[int]=None):
'''simple docstring'''
if red is not None:
__lowercase =red
if green is not None:
__lowercase =green
if blue is not None:
__lowercase =blue
if red_edge is not None:
__lowercase =red_edge
if nir is not None:
__lowercase =nir
return True
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : List[Any]="" , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : str=None):
'''simple docstring'''
self.set_matricies(red=_lowerCAmelCase , green=_lowerCAmelCase , blue=_lowerCAmelCase , red_edge=_lowerCAmelCase , nir=_lowerCAmelCase)
__lowercase ={
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def __lowerCamelCase ( self : str):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowerCamelCase ( self : str):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowerCamelCase ( self : str):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Optional[int]=0.08 , _lowerCAmelCase : Optional[Any]=1.22 , _lowerCAmelCase : Optional[int]=0.03):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return (self.nir / self.green) - 1
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return (self.red - self.blue) / self.red
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def __lowerCamelCase ( self : str):
'''simple docstring'''
return self.nir - self.green
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[Any]=0.16):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=0.5):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def __lowerCamelCase ( self : int , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[int]=None):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return self.nir / self.red
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
__lowercase =np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self.nir / self.red
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 48 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """bart"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , _lowerCAmelCase : Any=5_0_2_6_5 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : List[Any]=1_2 , _lowerCAmelCase : Any=4_0_9_6 , _lowerCAmelCase : List[str]=1_6 , _lowerCAmelCase : List[Any]=1_2 , _lowerCAmelCase : Dict=4_0_9_6 , _lowerCAmelCase : Optional[Any]=1_6 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : str=1_0_2_4 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : str=2 , **_lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =max_position_embeddings
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =classifier_dropout
__lowercase =use_cache
__lowercase =encoder_layers
__lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _lowerCAmelCase):
__lowercase =self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.')
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super().outputs
else:
__lowercase =super(_lowerCAmelCase , self).outputs
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# Generate decoder inputs
__lowercase =seq_length if not self.use_past else 1
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
__lowercase ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowercase =dict(**_lowerCAmelCase , **_lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
__lowercase =common_inputs['decoder_input_ids'].shape[1]
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =decoder_seq_length + 3
__lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase)] , dim=1)
__lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase =self.num_layers
__lowercase =min(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =max(_lowerCAmelCase , _lowerCAmelCase) - min_num_layers
__lowercase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_lowerCAmelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
))
# TODO: test this.
__lowercase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)))
return common_inputs
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase , __lowercase =self.num_layers
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =common_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[common_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(_lowerCAmelCase)
]
return common_inputs
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase =tokenizer.num_special_tokens_to_add(_lowerCAmelCase)
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase)
# Generate dummy inputs according to compute batch and sequence
__lowercase =[' '.join([tokenizer.unk_token]) * seq_length] * batch_size
__lowercase =dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase))
return common_inputs
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
elif self.task == "causal-lm":
__lowercase =self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
else:
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
return common_inputs
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
else:
__lowercase =super(_lowerCAmelCase , self)._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
| 48 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
lowerCamelCase__ : Tuple = precision
lowerCamelCase__ : Optional[int] = ceil(precision / 14 )
lowerCamelCase__ : Optional[Any] = 42_6880 * Decimal(1_0005 ).sqrt()
lowerCamelCase__ : int = 1
lowerCamelCase__ : List[str] = 1359_1409
lowerCamelCase__ : Union[str, Any] = Decimal(_UpperCAmelCase )
for k in range(1 , _UpperCAmelCase ):
lowerCamelCase__ : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(_UpperCAmelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_UpperCAmelCase : List[str] = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 50 |
from itertools import count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 50 ) -> int:
lowerCamelCase__ : Optional[Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _lowerCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple = True , UpperCAmelCase : Optional[Any] = math.inf , UpperCAmelCase : Optional[int] = -math.inf , UpperCAmelCase : Optional[int] = math.inf , UpperCAmelCase : Optional[int] = -math.inf , UpperCAmelCase : Tuple = False , UpperCAmelCase : Dict = 100 , UpperCAmelCase : int = 0.01 , UpperCAmelCase : Dict = 1 , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : List[Any] =search_prob
UpperCamelCase__ : str =start_temperate
UpperCamelCase__ : Dict =[]
UpperCamelCase__ : str =0
UpperCamelCase__ : str =None
while not search_end:
UpperCamelCase__ : List[Any] =current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase__ : Union[str, Any] =current_state
scores.append(__snake_case )
iterations += 1
UpperCamelCase__ : str =None
UpperCamelCase__ : Union[str, Any] =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase__ : Any =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
UpperCamelCase__ : Union[str, Any] =neighbors.pop(__snake_case )
UpperCamelCase__ : Optional[int] =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase__ : Dict =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase__ : Any =picked_neighbor
else:
UpperCamelCase__ : Tuple =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase__ : Optional[int] =picked_neighbor
UpperCamelCase__ : Tuple =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase__ : Union[str, Any] =True
else:
UpperCamelCase__ : Any =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def _lowerCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_SCREAMING_SNAKE_CASE : int = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE : Tuple = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_SCREAMING_SNAKE_CASE : Optional[int] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE : Tuple = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
_SCREAMING_SNAKE_CASE : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE : Tuple = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
_SCREAMING_SNAKE_CASE : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 364 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase_ : Tuple=None , **lowercase_ : int ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
UpperCamelCase__ : Optional[Any] =model
UpperCamelCase__ : str =kwargs.get('''model_save_dir''' , lowercase_ )
UpperCamelCase__ : int =kwargs.get('''latest_model_name''' , lowercase_ )
def __call__( self : Any , **lowercase_ : Any ):
UpperCamelCase__ : str ={k: np.array(lowercase_ ) for k, v in kwargs.items()}
return self.model.run(lowercase_ , lowercase_ )
@staticmethod
def _lowerCAmelCase ( lowercase_ : Union[str, Path] , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
UpperCamelCase__ : List[str] ='''CPUExecutionProvider'''
return ort.InferenceSession(lowercase_ , providers=[provider] , sess_options=lowercase_ )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Path] , lowercase_ : Optional[str] = None , **lowercase_ : Union[str, Any] ):
UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCamelCase__ : Tuple =self.model_save_dir.joinpath(self.latest_model_name )
UpperCamelCase__ : str =Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCamelCase__ : List[str] =self.model_save_dir.joinpath(lowercase_ )
if src_path.exists():
UpperCamelCase__ : List[str] =Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, os.PathLike] , **lowercase_ : int , ):
if os.path.isfile(lowercase_ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
# saving model weights/files
self._save_pretrained(lowercase_ , **lowercase_ )
@classmethod
def _lowerCAmelCase ( cls : List[str] , lowercase_ : Union[str, Path] , lowercase_ : Optional[Union[bool, str, None]] = None , lowercase_ : Optional[Union[str, None]] = None , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional["ort.SessionOptions"] = None , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase_ ):
UpperCamelCase__ : Any =OnnxRuntimeModel.load_model(
os.path.join(lowercase_ , lowercase_ ) , provider=lowercase_ , sess_options=lowercase_ )
UpperCamelCase__ : List[str] =Path(lowercase_ )
# load model from hub
else:
# download model
UpperCamelCase__ : Tuple =hf_hub_download(
repo_id=lowercase_ , filename=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , )
UpperCamelCase__ : Any =Path(lowercase_ ).parent
UpperCamelCase__ : List[Any] =Path(lowercase_ ).name
UpperCamelCase__ : Optional[int] =OnnxRuntimeModel.load_model(lowercase_ , provider=lowercase_ , sess_options=lowercase_ )
return cls(model=lowercase_ , **lowercase_ )
@classmethod
def _lowerCAmelCase ( cls : Dict , lowercase_ : Union[str, Path] , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Dict =None
if len(str(lowercase_ ).split('''@''' ) ) == 2:
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =model_id.split('''@''' )
return cls._from_pretrained(
model_id=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , use_auth_token=lowercase_ , **lowercase_ , )
| 157 | 0 |
from collections.abc import Callable
import numpy as np
def UpperCamelCase( __UpperCamelCase : Callable ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
lowerCAmelCase_ : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase_ : Optional[Any] = np.zeros((n + 1,) )
lowerCAmelCase_ : Tuple = ya
lowerCAmelCase_ : Dict = xa
for k in range(__UpperCamelCase ):
lowerCAmelCase_ : str = y[k] + step_size * ode_func(__UpperCamelCase ,y[k] )
lowerCAmelCase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase ,y[k] ) + ode_func(x + step_size ,__UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'levit'
def __init__( self :Any , _lowercase :int=2_24 , _lowercase :str=3 , _lowercase :List[str]=3 , _lowercase :Tuple=2 , _lowercase :Optional[int]=1 , _lowercase :Any=16 , _lowercase :Optional[int]=[1_28, 2_56, 3_84] , _lowercase :Dict=[4, 8, 12] , _lowercase :Optional[Any]=[4, 4, 4] , _lowercase :Optional[int]=[16, 16, 16] , _lowercase :List[Any]=0 , _lowercase :int=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Any=0.02 , **_lowercase :List[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
return 1e-4
| 201 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ = [tuple(_lowercase ) if isinstance(_lowercase , _lowercase ) else key for key in keys]
lowercase__ = Counter(_lowercase )
lowercase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase ( self :Any , _lowercase :str , _lowercase :Dict=False ):
'''simple docstring'''
lowercase__ = super().construct_mapping(_lowercase , deep=_lowercase )
self._check_no_duplicates_on_constructed_node(_lowercase )
return mapping
def _A ( __magic_name__ ):
lowercase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ = full_content[1:].index("---" ) + 1
lowercase__ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__magic_name__ )
class lowerCAmelCase ( lowercase_ ):
# class attributes
__lowerCamelCase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase ( cls :Dict , _lowercase :Path ):
'''simple docstring'''
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ , lowercase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowercase )
else:
return cls()
def UpperCAmelCase ( self :Any , _lowercase :Path ):
'''simple docstring'''
if path.exists():
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ = readme_file.read()
else:
lowercase__ = None
lowercase__ = self._to_readme(_lowercase )
with open(_lowercase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(_lowercase )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
lowercase__ , lowercase__ = _split_yaml_from_readme(_lowercase )
lowercase__ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowercase__ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = yaml.load(_lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowercase , allow_unicode=_lowercase , encoding="utf-8" , ).decode("utf-8" )
_snake_case = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_snake_case = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_snake_case = ap.parse_args()
_snake_case = Path(args.readme_filepath)
_snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 201 | 1 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase , __lowercase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 79 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case : List[Any] = logging.getLogger(__name__)
snake_case : Optional[int] = 50 # max width of layer names
snake_case : Any = 70 # max width of quantizer names
def __lowercase ( __lowerCAmelCase : Tuple ):
a__ = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=__lowerCAmelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=__lowerCAmelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=__lowerCAmelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=__lowerCAmelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=__lowerCAmelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=__lowerCAmelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if args.calibrator == "max":
a__ = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
a__ = 'histogram'
elif args.calibrator == "mse":
a__ = 'histogram'
else:
raise ValueError(F'Invalid calibrator {args.calibrator}' )
a__ = QuantDescriptor(num_bits=args.aprec , calib_method=__lowerCAmelCase )
a__ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False ):
logger.info('Configuring Model for Quantization' )
logger.info(F'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowerCAmelCase , ['embeddings'] , which='weight' , _disabled=__lowerCAmelCase )
if args.quant_disable:
set_quantizer_by_name(__lowerCAmelCase , [''] , _disabled=__lowerCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowerCAmelCase , args.quant_disable_keyword , _disabled=__lowerCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=__lowerCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=__lowerCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(__lowerCAmelCase )
if args.fuse_qkv:
fuse_qkv(__lowerCAmelCase , __lowerCAmelCase )
if args.clip_gelu:
clip_gelu(__lowerCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}' )
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
def fusea(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(__lowerCAmelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
a__ = qq._amax.detach().item()
a__ = qk._amax.detach().item()
a__ = qv._amax.detach().item()
a__ = max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
qq._amax.fill_(__lowerCAmelCase )
qk._amax.fill_(__lowerCAmelCase )
qv._amax.fill_(__lowerCAmelCase )
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
a__ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCAmelCase )
a__ = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
a__ = mod.weight.shape[0]
a__ = mod._weight_quantizer._amax.detach()
a__ = torch.ones(__lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
a__ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
a__ = set(range(len(mod.weight.size() ) ) ) - axis_set
a__ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowerCAmelCase , keepdims=__lowerCAmelCase ).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
a__ = amax
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int]=2_5 , __lowerCAmelCase : List[Any]=1_8_0 , __lowerCAmelCase : Tuple=None ):
if ignore is None:
a__ = []
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a__ = [ignore]
a__ = 0
for name, mod in model.named_modules():
if not hasattr(__lowerCAmelCase , 'weight' ):
continue
a__ = max(__lowerCAmelCase , len(__lowerCAmelCase ) )
for name, mod in model.named_modules():
a__ = getattr(__lowerCAmelCase , '_input_quantizer' , __lowerCAmelCase )
a__ = getattr(__lowerCAmelCase , '_weight_quantizer' , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , 'weight' ):
continue
if type(__lowerCAmelCase ) in ignore:
continue
if [True for s in ignore if type(__lowerCAmelCase ) is str and s in name]:
continue
a__ = F'Act:{input_q.extra_repr()}'
a__ = F'Wgt:{weight_q.extra_repr()}'
a__ = F'{name:{name_width}} {act_str} {wgt_str}'
if len(__lowerCAmelCase ) <= line_width:
logger.info(__lowerCAmelCase )
else:
logger.info(F'{name:{name_width}} {act_str}' )
logger.info(F'{" ":{name_width}} {wgt_str}' )
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = 0
for name, mod in model.named_modules():
if isinstance(__lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'{name:80} {mod}' )
count += 1
print(F'{count} TensorQuantizers found in model' )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
a__ = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if quantizer_mod is not None:
assert hasattr(__lowerCAmelCase , __lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
logger.warning(F'{name} has no {quantizer}' )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]="both" , **__lowerCAmelCase : str ):
a__ = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '_input_quantizer' , __lowerCAmelCase , __lowerCAmelCase )
if which in ["weight", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '_weight_quantizer' , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Optional[Any] ):
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_input_quantizer' ) or hasattr(__lowerCAmelCase , '_weight_quantizer' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
set_quantizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
a__ = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase )
| 240 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _snake_case ):
lowercase = "Speech2TextFeatureExtractor"
lowercase = "Speech2TextTokenizer"
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.feature_extractor
A_ = False
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A_ = kwargs.pop("""raw_speech""" )
else:
A_ = kwargs.pop("""audio""" , UpperCamelCase__ )
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
A_ = kwargs.pop("""text""" , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A_ = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ = encodings["""input_ids"""]
return inputs
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@contextmanager
def snake_case_ ( self ) -> int:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A_ = True
A_ = self.tokenizer
yield
A_ = self.feature_extractor
A_ = False
| 101 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 101 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCamelCase ( lowerCAmelCase__ = "isbn/0140328726" ):
'''simple docstring'''
lowercase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
lowercase = f'{olid} is not a valid Open Library olid'
raise ValueError(lowerCAmelCase__ )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
lowercase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowercase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
lowercase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ''', '''.join(lowerCAmelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowercase__ :List[Any] = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
lowercase__ :List[str] = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print("\n".join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 101 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =ShapEPipeline
lowercase_ : List[Any] =['''prompt''']
lowercase_ : int =['''prompt''']
lowercase_ : Union[str, Any] =[
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase_ : Optional[int] =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 8
@property
def A__ ( self):
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(A__)
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase = PriorTransformer(**A__)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase = ShapERenderer(**A__)
return model
def A__ ( self):
lowercase = self.dummy_prior
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_renderer
lowercase = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=A__ ,clip_sample=A__ ,clip_sample_range=1.0 ,)
lowercase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images[0]
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowercase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A__ ( self):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def A__ ( self):
lowercase = torch_device == '''cpu'''
lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=A__ ,relax_max_difference=A__ ,)
def A__ ( self):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = 1
lowercase = 2
lowercase = self.get_dummy_inputs(A__)
for key in inputs.keys():
if key in self.batch_params:
lowercase = batch_size * [inputs[key]]
lowercase = pipe(**A__ ,num_images_per_prompt=A__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''')
lowercase = ShapEPipeline.from_pretrained('''openai/shap-e''')
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = pipe(
'''a shark''' ,generator=A__ ,guidance_scale=15.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(A__ ,A__)
| 101 | 1 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
# Load checkpoint
lowercase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
lowercase : str = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
lowercase : Tuple = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase : str = v
else:
lowercase : int = v
lowercase : Tuple = chkpt["""params"""]
lowercase : Optional[Any] = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE__ , (torch.FloatTensor, numpy.ndarray) )}
lowercase : Union[str, Any] = chkpt["""dico_word2id"""]
lowercase : Union[str, Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase : Tuple = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase : Union[str, Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowercase : int = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 ) + """\n""" )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase : Union[str, Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 351 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = tempfile.mkdtemp()
lowercase : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowercase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase : Any = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowercase : List[str] = os.path.join(self.tmpdirname ,snake_case )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase : str = [Image.fromarray(np.moveaxis(snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.get_tokenizer()
lowercase : Dict = self.get_rust_tokenizer()
lowercase : Union[str, Any] = self.get_image_processor()
lowercase : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowercase : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=snake_case )
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowercase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,snake_case )
self.assertIsInstance(processor_fast.tokenizer ,snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,snake_case )
self.assertIsInstance(processor_fast.image_processor ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : int = self.get_tokenizer(cls_token="""(CLS)""" ,sep_token="""(SEP)""" )
lowercase : Dict = self.get_image_processor(do_normalize=snake_case )
lowercase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname ,cls_token="""(CLS)""" ,sep_token="""(SEP)""" ,do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : List[str] = self.get_tokenizer()
lowercase : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : Optional[int] = self.prepare_image_inputs()
lowercase : Tuple = image_processor(snake_case ,return_tensors="""np""" )
lowercase : Optional[int] = processor(images=snake_case ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : Any = """Alexandra,T-shirt的价格是15便士。"""
lowercase : int = processor(text=snake_case )
lowercase : Dict = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.get_image_processor()
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : Dict = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : List[str] = """Alexandra,T-shirt的价格是15便士。"""
lowercase : Any = self.prepare_image_inputs()
lowercase : Optional[Any] = processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : str = self.get_tokenizer()
lowercase : Dict = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : Any = processor.batch_decode(snake_case )
lowercase : str = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.get_image_processor()
lowercase : Dict = self.get_tokenizer()
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : str = """Alexandra,T-shirt的价格是15便士。"""
lowercase : Optional[Any] = self.prepare_image_inputs()
lowercase : Dict = processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 285 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->list:
if len(_SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
a__: List[str] = False
if low == high:
return swapped
a__: Tuple = low
a__: List[Any] = high
while left < right:
if collection[left] > collection[right]:
a__ , a__: Tuple = (
collection[right],
collection[left],
)
a__: str = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
a__ , a__: Optional[Any] = (
collection[right + 1],
collection[left],
)
a__: str = True
a__: Any = low + int((high - low) / 2 )
a__: Optional[Any] = circle_sort_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Dict = circle_sort_util(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
a__: Tuple = True
while is_not_sorted is True:
a__: Any = circle_sort_util(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
lowercase__ = input('Enter numbers separated by a comma:\n').strip()
lowercase__ = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 290 | """simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__: Optional[int] = [image]
a__: str = [trans(img.convert('RGB' ) ) for img in image]
a__: Any = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
a__: Dict = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase , scheduler=lowercase)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = min(int(num_inference_steps * strength) , lowercase)
a__: Any = max(num_inference_steps - init_timestep , 0)
a__: Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase)}')
a__: Tuple = image.to(device=lowercase , dtype=lowercase)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__: List[str] = init_latents.shape
a__: List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
# get latents
print('add noise to latents at timestep' , lowercase)
a__: int = self.scheduler.add_noise(lowercase , lowercase , lowercase)
a__: Dict = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase)
# 2. Preprocess image
a__: Tuple = preprocess(lowercase)
# 3. set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device)
a__ , a__: Union[str, Any] = self.get_timesteps(lowercase , lowercase , self.device)
a__: Optional[int] = timesteps[:1].repeat(lowercase)
# 4. Prepare latent variables
a__: Union[str, Any] = self.prepare_latents(lowercase , lowercase , lowercase , self.unet.dtype , self.device , lowercase)
a__: Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase):
# 1. predict noise model_output
a__: Dict = self.unet(lowercase , lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__: Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase , ).prev_sample
a__: Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1)
a__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__: Dict = self.numpy_to_pil(lowercase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase)
| 290 | 1 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = ""
lowercase_ = ""
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase_ = self.__min_dist_top_down_dp(UpperCAmelCase , n - 1 )
lowercase_ = self.__min_dist_top_down_dp(m - 1 , UpperCAmelCase )
lowercase_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase_ = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return self.dp[m][n]
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = worda
lowercase_ = worda
lowercase_ = [[-1 for _ in range(len(UpperCAmelCase ) )] for _ in range(len(UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(UpperCAmelCase ) - 1 , len(UpperCAmelCase ) - 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = worda
lowercase_ = worda
lowercase_ = len(UpperCAmelCase )
lowercase_ = len(UpperCAmelCase )
lowercase_ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase_ = j
elif j == 0: # second string is empty
lowercase_ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase_ = self.dp[i - 1][j - 1]
else:
lowercase_ = self.dp[i][j - 1]
lowercase_ = self.dp[i - 1][j]
lowercase_ = self.dp[i - 1][j - 1]
lowercase_ = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
SCREAMING_SNAKE_CASE__ = input("""Enter the first string: """).strip()
SCREAMING_SNAKE_CASE__ = input("""Enter the second string: """).strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 297 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297 | 1 |
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ) -> list[int]:
"""simple docstring"""
UpperCamelCase :List[str] = [0] * no_of_processes
UpperCamelCase :str = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__magic_name__ ):
UpperCamelCase :Optional[int] = burst_time[i]
UpperCamelCase :str = 0
UpperCamelCase :Tuple = 0
UpperCamelCase :Union[str, Any] = 9_9999_9999
UpperCamelCase :Optional[Any] = 0
UpperCamelCase :Optional[int] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__magic_name__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCamelCase :Dict = remaining_time[j]
UpperCamelCase :Optional[Any] = j
UpperCamelCase :List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCamelCase :List[str] = remaining_time[short]
if minm == 0:
UpperCamelCase :Any = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
UpperCamelCase :Dict = False
# Find finish time of current process
UpperCamelCase :Dict = increment_time + 1
# Calculate waiting time
UpperCamelCase :Union[str, Any] = finish_time - arrival_time[short]
UpperCamelCase :Optional[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCamelCase :Optional[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : list[int] ) -> list[int]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = [0] * no_of_processes
for i in range(__magic_name__ ):
UpperCamelCase :List[str] = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCamelCase :int = 0
UpperCamelCase :str = 0
for i in range(__magic_name__ ):
UpperCamelCase :List[Any] = total_waiting_time + waiting_time[i]
UpperCamelCase :str = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
UpperCAmelCase_ : Any = int(input())
UpperCAmelCase_ : Optional[int] = [0] * no_of_processes
UpperCAmelCase_ : Any = [0] * no_of_processes
UpperCAmelCase_ : Any = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
UpperCAmelCase_ , UpperCAmelCase_ : Dict = map(int, input().split())
UpperCAmelCase_ : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase_ : Tuple = burst_time
UpperCAmelCase_ : List[str] = no_of_processes
UpperCAmelCase_ : Union[str, Any] = waiting_time
UpperCAmelCase_ : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase_ : Tuple = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 38 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
__UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' )
__UpperCamelCase =soup.find_all('td' , attrs='titleColumn' )
__UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ):
__UpperCamelCase =get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file:
__UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A ( lowercase , lowercase , lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase = tau * frequency / samplerate
UpperCamelCase = sin(lowercase )
UpperCamelCase = cos(lowercase )
UpperCamelCase = _sin / (2 * q_factor)
UpperCamelCase = (1 - _cos) / 2
UpperCamelCase = 1 - _cos
UpperCamelCase = 1 + alpha
UpperCamelCase = -2 * _cos
UpperCamelCase = 1 - alpha
UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( lowercase , lowercase , lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase = tau * frequency / samplerate
UpperCamelCase = sin(lowercase )
UpperCamelCase = cos(lowercase )
UpperCamelCase = _sin / (2 * q_factor)
UpperCamelCase = (1 + _cos) / 2
UpperCamelCase = -1 - _cos
UpperCamelCase = 1 + alpha
UpperCamelCase = -2 * _cos
UpperCamelCase = 1 - alpha
UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( lowercase , lowercase , lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase = tau * frequency / samplerate
UpperCamelCase = sin(lowercase )
UpperCamelCase = cos(lowercase )
UpperCamelCase = _sin / (2 * q_factor)
UpperCamelCase = _sin / 2
UpperCamelCase = 0
UpperCamelCase = -ba
UpperCamelCase = 1 + alpha
UpperCamelCase = -2 * _cos
UpperCamelCase = 1 - alpha
UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( lowercase , lowercase , lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase = tau * frequency / samplerate
UpperCamelCase = sin(lowercase )
UpperCamelCase = cos(lowercase )
UpperCamelCase = _sin / (2 * q_factor)
UpperCamelCase = 1 - alpha
UpperCamelCase = -2 * _cos
UpperCamelCase = 1 + alpha
UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A ( lowercase , lowercase , lowercase , lowercase = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase = tau * frequency / samplerate
UpperCamelCase = sin(lowercase )
UpperCamelCase = cos(lowercase )
UpperCamelCase = _sin / (2 * q_factor)
UpperCamelCase = 10 ** (gain_db / 40)
UpperCamelCase = 1 + alpha * big_a
UpperCamelCase = -2 * _cos
UpperCamelCase = 1 - alpha * big_a
UpperCamelCase = 1 + alpha / big_a
UpperCamelCase = -2 * _cos
UpperCamelCase = 1 - alpha / big_a
UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( lowercase , lowercase , lowercase , lowercase = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase = tau * frequency / samplerate
UpperCamelCase = sin(lowercase )
UpperCamelCase = cos(lowercase )
UpperCamelCase = _sin / (2 * q_factor)
UpperCamelCase = 10 ** (gain_db / 40)
UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
UpperCamelCase = 2 * sqrt(lowercase ) * alpha
UpperCamelCase = big_a * (pmc + aaa)
UpperCamelCase = 2 * big_a * mpc
UpperCamelCase = big_a * (pmc - aaa)
UpperCamelCase = ppmc + aaa
UpperCamelCase = -2 * pmpc
UpperCamelCase = ppmc - aaa
UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( lowercase , lowercase , lowercase , lowercase = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase = tau * frequency / samplerate
UpperCamelCase = sin(lowercase )
UpperCamelCase = cos(lowercase )
UpperCamelCase = _sin / (2 * q_factor)
UpperCamelCase = 10 ** (gain_db / 40)
UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
UpperCamelCase = 2 * sqrt(lowercase ) * alpha
UpperCamelCase = big_a * (ppmc + aaa)
UpperCamelCase = -2 * big_a * pmpc
UpperCamelCase = big_a * (ppmc - aaa)
UpperCamelCase = pmc + aaa
UpperCamelCase = 2 * mpc
UpperCamelCase = pmc - aaa
UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 110 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ = 768 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
UpperCamelCase = nn.Parameter(torch.ones(1 , A_ ) )
def __UpperCamelCase ( self , A_ = None , A_ = None , ) -> Any:
"""simple docstring"""
UpperCamelCase = nn.Parameter(self.mean.to(A_ ).to(A_ ) )
UpperCamelCase = nn.Parameter(self.std.to(A_ ).to(A_ ) )
return self
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 110 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCAmelCase__ = 1_00_00
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCAmelCase__ = ParquetConfig
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def __lowerCamelCase ( self : int , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
__lowercase =dl_manager.download_and_extract(self.config.data_files)
if isinstance(_lowerCAmelCase , (str, list, tuple)):
__lowercase =data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase =[dl_manager.iter_files(_lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__lowercase =[]
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase =[dl_manager.iter_files(_lowerCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowerCAmelCase):
with open(_lowerCAmelCase , 'rb') as f:
__lowercase =datasets.Features.from_arrow_schema(pq.read_schema(_lowerCAmelCase))
break
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'files': files}))
return splits
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : pa.Table):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase =table_cast(_lowerCAmelCase , self.info.features.arrow_schema)
return pa_table
def __lowerCamelCase ( self : str , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""")
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase)):
with open(_lowerCAmelCase , 'rb') as f:
__lowercase =pq.ParquetFile(_lowerCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
__lowercase =pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(_lowerCAmelCase)
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(_lowerCAmelCase)}: {e}""")
raise
| 166 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( _lowerCAmelCase=32 , _lowerCAmelCase=10 , _lowerCAmelCase=100 , _lowerCAmelCase=1_026 , _lowerCAmelCase=True , _lowerCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , _lowerCAmelCase="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
__lowercase , __lowercase =generate_datasets(
_lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1_026 , trim=_lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__lowercase =load_gpta('gpt2' ).to(_lowerCAmelCase )
print('computing perplexity on objective set' )
__lowercase =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).item()
print('perplexity on objective set:' , _lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( _lowerCAmelCase , _lowerCAmelCase=15 , _lowerCAmelCase=128 , _lowerCAmelCase=100 , _lowerCAmelCase="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
__lowercase =GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__lowercase =SecondaryLearner(_lowerCAmelCase )
# Train secondary learner
__lowercase =train_secondary_learner(
_lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=100 , igf_model_path=_lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=32 , _lowerCAmelCase=1_000 , _lowerCAmelCase=16 , _lowerCAmelCase=1.0 , _lowerCAmelCase=recopy_gpta , _lowerCAmelCase=None , _lowerCAmelCase=10 , _lowerCAmelCase="gpt2_finetuned.pt" , ):
"""simple docstring"""
__lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__lowercase =RandomSampler(_lowerCAmelCase )
__lowercase =DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase )
__lowercase =max_steps // (len(_lowerCAmelCase )) + 1
__lowercase =0
__lowercase =torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase )
__lowercase , __lowercase , __lowercase =recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCAmelCase )
secondary_learner.eval()
__lowercase =[]
__lowercase =0
__lowercase =[]
__lowercase =[]
# Compute the performance of the transformer model at the beginning
__lowercase =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print('Test perplexity, step' , _lowerCAmelCase , ':' , _lowerCAmelCase )
for epoch in range(int(_lowerCAmelCase ) ):
for step, example in enumerate(_lowerCAmelCase ):
torch.cuda.empty_cache()
__lowercase =random.randint(0 , example.size(2 ) - context_len - 1 )
__lowercase =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__lowercase =model(_lowerCAmelCase , labels=_lowerCAmelCase )
__lowercase =True
if secondary_learner is not None:
__lowercase =secondary_learner.forward(
torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__lowercase =-1
if predicted_q < threshold:
__lowercase =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__lowercase =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__lowercase =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__lowercase =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print('Test perplexity, step' , _lowerCAmelCase , ':' , _lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=_lowerCAmelCase , default=_lowerCAmelCase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=_lowerCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=_lowerCAmelCase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=_lowerCAmelCase , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1_000 , type=_lowerCAmelCase , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=_lowerCAmelCase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=_lowerCAmelCase , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=_lowerCAmelCase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=_lowerCAmelCase , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1_026 , type=_lowerCAmelCase , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=_lowerCAmelCase , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=_lowerCAmelCase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_lowerCAmelCase , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=_lowerCAmelCase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
__lowercase =joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__lowercase =training_secondary_learner(
_lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
__lowercase =GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__lowercase , __lowercase =generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1_026 , trim=_lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 166 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
_lowerCamelCase : List[Any] = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_lowerCamelCase : str = {
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_lowerCamelCase : Tuple = frequencies_dict
if not case_sensitive:
_lowerCamelCase : List[Any] = ciphertext.lower()
# Chi squared statistic values
_lowerCamelCase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
_lowerCamelCase : int = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_lowerCamelCase : int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_lowerCamelCase : Optional[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_lowerCamelCase : Optional[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCamelCase : Any = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCamelCase : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCamelCase : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCamelCase : str = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCamelCase : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCamelCase : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_lowerCamelCase : Tuple = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_lowerCamelCase : int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_lowerCamelCase
), (
_lowerCamelCase
),
) : str = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
) | 12 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = 1_0
def _lowercase ( self : int ) -> List[str]:
lowercase_ = [1, 2, 3, 4]
lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ''''''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = ['''It was the best of times.''']
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = torch.tensor([1, 2, 3, 4] )
lowercase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : int ) -> Dict:
lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = 1_0_1
lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 30 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCAmelCase__ = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
lowerCAmelCase__ = None
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCamelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCamelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def remove_articles(lowerCamelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
lowercase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not s:
return []
return normalize_answer(lowerCamelCase__ ).split()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = get_tokens(lowerCamelCase__ )
lowercase__ : List[Any] = get_tokens(lowerCamelCase__ )
lowercase__ : Dict = collections.Counter(lowerCamelCase__ ) & collections.Counter(lowerCamelCase__ )
lowercase__ : Dict = sum(common.values() )
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase__ : int = 1.0 * num_same / len(lowerCamelCase__ )
lowercase__ : List[str] = 1.0 * num_same / len(lowerCamelCase__ )
lowercase__ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = {}
lowercase__ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : int = qa["id"]
lowercase__ : str = [t for t in qa["answers"]["text"] if normalize_answer(lowerCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase__ : Optional[int] = [""]
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
lowercase__ : Tuple = preds[qid]
# Take max over all gold answers
lowercase__ : Tuple = max(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
lowercase__ : Optional[int] = max(compute_fa(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = {}
for qid, s in scores.items():
lowercase__ : Optional[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase__ : Optional[int] = float(not qid_to_has_ans[qid] )
else:
lowercase__ : List[Any] = s
return new_scores
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
if not qid_list:
lowercase__ : int = len(lowerCamelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
lowercase__ : int = len(lowerCamelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for k in new_eval:
lowercase__ : Optional[int] = new_eval[k]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
plt.step(lowerCamelCase__ , lowerCamelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCamelCase__ , lowerCamelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase__ )
plt.savefig(lowerCamelCase__ )
plt.clf()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
lowercase__ : Union[str, Any] = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
lowercase__ : List[Any] = 0.0
lowercase__ : Union[str, Any] = 1.0
lowercase__ : List[str] = 0.0
lowercase__ : Tuple = [1.0]
lowercase__ : Optional[Any] = [0.0]
lowercase__ : Any = 0.0
for i, qid in enumerate(lowerCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase__ : Dict = true_pos / float(i + 1 )
lowercase__ : List[Any] = true_pos / float(lowerCamelCase__ )
if i == len(lowerCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase__ )
recalls.append(lowerCamelCase__ )
if out_image:
plot_pr_curve(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return {"ap": 100.0 * avg_prec}
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if out_image_dir and not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
lowercase__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase__ : Any = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
lowercase__ : str = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
lowercase__ : List[Any] = {k: float(lowerCamelCase__ ) for k, v in qid_to_has_ans.items()}
lowercase__ : int = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "pr_exact" )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "pr_f1" )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "pr_oracle" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if not qid_list:
return
lowercase__ : List[Any] = [na_probs[k] for k in qid_list]
lowercase__ : int = np.ones_like(lowerCamelCase__ ) / float(len(lowerCamelCase__ ) )
plt.hist(lowerCamelCase__ , weights=lowerCamelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCamelCase__ , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase__ : List[str] = num_no_ans
lowercase__ : Union[str, Any] = cur_score
lowercase__ : Optional[Any] = 0.0
lowercase__ : int = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase__ : Optional[Any] = scores[qid]
else:
if preds[qid]:
lowercase__ : Optional[int] = -1
else:
lowercase__ : int = 0
cur_score += diff
if cur_score > best_score:
lowercase__ : Tuple = cur_score
lowercase__ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase__ ), best_thresh
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ , lowercase__ : str = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = best_exact
lowercase__ : Optional[Any] = exact_thresh
lowercase__ : Union[str, Any] = best_fa
lowercase__ : int = fa_thresh
def __lowerCamelCase ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
lowercase__ : Optional[int] = json.load(lowerCamelCase__ )
lowercase__ : Any = dataset_json["data"]
with open(OPTS.pred_file ) as f:
lowercase__ : Any = json.load(lowerCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase__ : Optional[Any] = json.load(lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = {k: 0.0 for k in preds}
lowercase__ : List[str] = make_qid_to_has_ans(lowerCamelCase__ ) # maps qid to True/False
lowercase__ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
lowercase__ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowercase__ , lowercase__ : List[str] = get_raw_scores(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : List[str] = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowercase__ : Dict = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowercase__ : List[str] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ )
if has_ans_qids:
lowercase__ : List[Any] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "HasAns" )
if no_ans_qids:
lowercase__ : Any = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
else:
print(json.dumps(lowerCamelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 121 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return TrainCommand(lowerCamelCase__ )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE : ArgumentParser ):
lowercase__ : Optional[int] = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=SCREAMING_SNAKE_CASE , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=SCREAMING_SNAKE_CASE , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=SCREAMING_SNAKE_CASE , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=SCREAMING_SNAKE_CASE , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=SCREAMING_SNAKE_CASE , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=SCREAMING_SNAKE_CASE , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=SCREAMING_SNAKE_CASE , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=SCREAMING_SNAKE_CASE , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=SCREAMING_SNAKE_CASE , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=SCREAMING_SNAKE_CASE , default=1E-0_8 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self : int , SCREAMING_SNAKE_CASE : Namespace ):
lowercase__ : int = logging.get_logger("transformers-cli/training" )
lowercase__ : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = args.output
lowercase__ : Union[str, Any] = args.column_label
lowercase__ : Optional[int] = args.column_text
lowercase__ : Optional[int] = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowercase__ : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
lowercase__ : List[str] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase__ : Union[str, Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
lowercase__ : Optional[int] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase__ : Dict = args.validation_split
lowercase__ : List[str] = args.train_batch_size
lowercase__ : Any = args.valid_batch_size
lowercase__ : Optional[int] = args.learning_rate
lowercase__ : int = args.adam_epsilon
def snake_case ( self : Dict ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self : Union[str, Any] ):
raise NotImplementedError
def snake_case ( self : Union[str, Any] ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 121 | 1 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : list, _lowerCAmelCase : list, _lowerCAmelCase : int ):
"""simple docstring"""
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_a = [p / w for p, w in zip(_lowerCAmelCase, _lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
_a = sorted(_lowerCAmelCase )
# declaring useful variables
_a = len(_lowerCAmelCase )
_a = 0
_a = 0
_a = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_a = sorted_profit_by_weight[length - i - 1]
_a = profit_by_weight.index(_lowerCAmelCase )
_a = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
__snake_case = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
__snake_case = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
__snake_case = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight) | 320 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() ) | 320 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 310 |
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310 | 1 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : List[Any]=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=64 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : List[Any]=None , ) -> Union[str, Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase ( self : List[str] ) -> str:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def UpperCamelCase ( self : Dict ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ) -> int:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
lowerCamelCase_ = MPNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> int:
lowerCamelCase_ = MPNetForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MPNetForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = MPNetForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> Any:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MPNetForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : str = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Tuple = True
def UpperCamelCase ( self : int ) -> Tuple:
lowerCamelCase_ = MPNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : int ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowerCamelCase__ )
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowerCamelCase__ )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Tuple ) -> Tuple:
lowerCamelCase_ = MPNetModel.from_pretrained('microsoft/mpnet-base' )
lowerCamelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ = model(lowerCamelCase__ )[0]
lowerCamelCase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCamelCase__ )
lowerCamelCase_ = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 183 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__snake_case : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 296 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def __a ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
lowerCAmelCase_ = DDIMScheduler()
lowerCAmelCase_ = {"unet": unet, "scheduler": scheduler}
return components
def __a ( self , _a , _a=0 ) -> List[Any]:
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "cpu"
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCAmelCase_ = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1E-3 )
def __a ( self ) -> Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __a ( self ) -> Union[str, Any]:
super().test_save_load_local(expected_max_difference=3E-3 )
def __a ( self ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __a ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "google/ddpm-cifar10-32"
lowerCAmelCase_ = UNetaDModel.from_pretrained(_a )
lowerCAmelCase_ = DDIMScheduler()
lowerCAmelCase_ = DDIMPipeline(unet=_a , scheduler=_a )
ddim.to(_a )
ddim.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = ddim(generator=_a , eta=0.0 , output_type="numpy" ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "google/ddpm-ema-bedroom-256"
lowerCAmelCase_ = UNetaDModel.from_pretrained(_a )
lowerCAmelCase_ = DDIMScheduler.from_pretrained(_a )
lowerCAmelCase_ = DDIMPipeline(unet=_a , scheduler=_a )
ddpm.to(_a )
ddpm.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = ddpm(generator=_a , output_type="numpy" ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 353 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22 | 0 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : int = 16 , UpperCAmelCase : int = 88 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 32 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "geglu" , UpperCAmelCase : Optional[int] = None , ):
super().__init__()
__lowerCamelCase : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__lowerCamelCase : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__lowerCamelCase : List[str] = [1, 0]
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any=None , UpperCAmelCase : bool = True , ):
__lowerCamelCase : Any = hidden_states
__lowerCamelCase : Any = []
__lowerCamelCase : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__lowerCamelCase : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__lowerCamelCase : Any = self.transformer_index_for_condition[i]
__lowerCamelCase : Optional[Any] = self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__lowerCamelCase : Union[str, Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase ) | 135 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = '''hf-internal-testing/tiny-random-bert'''
__A = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
__A = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Dict = cached_file(UpperCAmelCase , UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) )
with open(os.path.join(UpperCAmelCase , "refs" , "main" ) ) as f:
__lowerCamelCase : Dict = f.read()
self.assertEqual(UpperCAmelCase , os.path.join(UpperCAmelCase , "snapshots" , UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(os.path.isfile(UpperCAmelCase ) )
# File is cached at the same place the second time.
__lowerCamelCase : Tuple = cached_file(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# Using a specific revision to test the full commit hash.
__lowerCamelCase : List[str] = cached_file(UpperCAmelCase , UpperCAmelCase , revision="9b8c223" )
self.assertEqual(UpperCAmelCase , os.path.join(UpperCAmelCase , "snapshots" , UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : List[str] ):
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid model identifier" ):
__lowerCamelCase : Optional[Any] = cached_file("tiny-random-bert" , UpperCAmelCase )
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid git identifier" ):
__lowerCamelCase : Dict = cached_file(UpperCAmelCase , UpperCAmelCase , revision="aaaa" )
with self.assertRaisesRegex(UpperCAmelCase , "does not appear to have a file named" ):
__lowerCamelCase : List[Any] = cached_file(UpperCAmelCase , "conf" )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(UpperCAmelCase , "does not appear to have a file named" ):
__lowerCamelCase : Any = cached_file(UpperCAmelCase , "conf" )
with open(os.path.join(UpperCAmelCase , "refs" , "main" ) ) as f:
__lowerCamelCase : List[str] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase , ".no_exist" , UpperCAmelCase , "conf" ) ) )
__lowerCamelCase : List[str] = cached_file(UpperCAmelCase , "conf" , _raise_exceptions_for_missing_entries=UpperCAmelCase )
self.assertIsNone(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = cached_file(UpperCAmelCase , "conf" , local_files_only=UpperCAmelCase , _raise_exceptions_for_missing_entries=UpperCAmelCase )
self.assertIsNone(UpperCAmelCase )
__lowerCamelCase : str = mock.Mock()
__lowerCamelCase : Union[str, Any] = 500
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Dict = HTTPError
__lowerCamelCase : Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase ) as mock_head:
__lowerCamelCase : Any = cached_file(UpperCAmelCase , "conf" , _raise_exceptions_for_connection_errors=UpperCAmelCase )
self.assertIsNone(UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : str ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase ) )
def lowerCamelCase__ ( self : Any ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCAmelCase , revision="ahaha" )
__lowerCamelCase : str = get_file_from_repo("bert-base-cased" , UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowerCamelCase : Tuple = json.loads(open(UpperCAmelCase , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Union[str, Any] = Path(UpperCAmelCase ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase , "a.txt" ) , str(UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase , "b.txt" ) ) | 135 | 1 |
'''simple docstring'''
class _snake_case ( lowercase_ ):
pass
class _snake_case ( lowercase_ ):
pass
class _snake_case :
def __init__( self ) -> Tuple:
'''simple docstring'''
snake_case_ = [
[],
[],
[],
]
def lowerCAmelCase__ ( self , a__ , a__ ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ) -> str:
'''simple docstring'''
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class _snake_case :
def __init__( self ) -> Tuple:
'''simple docstring'''
snake_case_ = []
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
snake_case_ = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ) -> str:
'''simple docstring'''
return str(self.queue )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(snake_case )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(snake_case )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(snake_case )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(snake_case )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 364 |
'''simple docstring'''
def UpperCamelCase_( snake_case : list[int] , snake_case : int ):
'''simple docstring'''
snake_case_ = len(snake_case )
snake_case_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case_ = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = StableUnCLIPImgaImgPipeline
lowercase_ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ : Dict = frozenset([] )
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = 32
_lowercase : Tuple = embedder_hidden_size
# image encoding components
_lowercase : Dict = CLIPImageProcessor(crop_size=32, size=32)
torch.manual_seed(0)
_lowercase : Optional[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ))
# regular denoising components
torch.manual_seed(0)
_lowercase : int = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase)
_lowercase : str = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
_lowercase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
_lowercase : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ))
torch.manual_seed(0)
_lowercase : Optional[Any] = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D'), up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D'), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='projection', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0)
_lowercase : int = DDIMScheduler(
beta_schedule='scaled_linear', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, prediction_type='v_prediction', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0)
_lowercase : Dict = AutoencoderKL()
_lowercase : Any = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0, lowerCamelCase=True) -> str:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[int] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
if pil_image:
_lowercase : Optional[Any] = input_image * 0.5 + 0.5
_lowercase : Any = input_image.clamp(0, 1)
_lowercase : Optional[int] = input_image.cpu().permute(0, 2, 3, 1).float().numpy()
_lowercase : List[str] = DiffusionPipeline.numpy_to_pil(lowerCamelCase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Tuple = self.get_dummy_components()
_lowercase : List[Any] = StableUnCLIPImgaImgPipeline(**lowerCamelCase)
_lowercase : Optional[int] = sd_pipe.to(lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs(lowerCamelCase)
inputs.update({'image_embeds': None})
_lowercase : int = sd_pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : str = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase)
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
_lowercase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy')
_lowercase : int = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img', torch_dtype=torch.floataa)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = torch.Generator(device='cpu').manual_seed(0)
_lowercase : Optional[int] = pipe(lowerCamelCase, 'anime turle', generator=lowerCamelCase, output_type='np')
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
_lowercase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy')
_lowercase : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img', torch_dtype=torch.floataa)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = torch.Generator(device='cpu').manual_seed(0)
_lowercase : str = pipe(lowerCamelCase, 'anime turle', generator=lowerCamelCase, output_type='np')
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img', torch_dtype=torch.floataa)
_lowercase : Optional[int] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Any = pipe(
lowerCamelCase, 'anime turtle', num_inference_steps=2, output_type='np', )
_lowercase : str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 21 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : str = "bart"
SCREAMING_SNAKE_CASE : Optional[int] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> int:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : Any = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : List[Any] = sas_model.eval()
else:
_lowercase , _lowercase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> str:
if LOAD_DENSE_INDEX:
_lowercase : Optional[Any] = faiss.StandardGpuResources()
_lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Any = (None, None)
_lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : Optional[Any] = elia['train_eli5']
_lowercase : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Dict = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict:
with torch.no_grad():
_lowercase : str = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : int = "wiki40b"
SCREAMING_SNAKE_CASE : int = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : int = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : str = None
# start main text
SCREAMING_SNAKE_CASE : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10]
SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : str = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : str = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 21 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCAmelCase , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : str,lowercase_ : GenericTensor )-> Any:
'''simple docstring'''
if self.framework == "tf":
A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id,as_tuple=_a )
else:
raise ValueError('Unsupported framework' )
return masked_index
def snake_case__ ( self : Tuple,lowercase_ : GenericTensor )-> Tuple:
'''simple docstring'''
A__ = self.get_masked_index(_a )
A__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask',self.model.base_model_prefix,F'No mask_token ({self.tokenizer.mask_token}) found on the input',)
def snake_case__ ( self : int,lowercase_ : GenericTensor )-> Tuple:
'''simple docstring'''
if isinstance(_a,_a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def snake_case__ ( self : List[Any],lowercase_ : int,lowercase_ : Dict=None,**lowercase_ : int )-> Optional[Any]:
'''simple docstring'''
if return_tensors is None:
A__ = self.framework
A__ = self.tokenizer(_a,return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def snake_case__ ( self : Tuple,lowercase_ : Any )-> List[Any]:
'''simple docstring'''
A__ = self.model(**_a )
A__ = model_inputs['input_ids']
return model_outputs
def snake_case__ ( self : str,lowercase_ : str,lowercase_ : Dict=5,lowercase_ : str=None )-> Tuple:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
A__ = target_ids.shape[0]
A__ = model_outputs['input_ids'][0]
A__ = model_outputs['logits']
if self.framework == "tf":
A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A__ = outputs.numpy()
A__ = outputs[0, masked_index, :]
A__ = stable_softmax(_a,axis=-1 )
if target_ids is not None:
A__ = tf.gather_nd(tf.squeeze(_a,0 ),target_ids.reshape(-1,1 ) )
A__ = tf.expand_dims(_a,0 )
A__ = tf.math.top_k(_a,k=_a )
A__ , A__ = topk.values.numpy(), topk.indices.numpy()
else:
A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id,as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A__ = outputs[0, masked_index, :]
A__ = logits.softmax(dim=-1 )
if target_ids is not None:
A__ = probs[..., target_ids]
A__ , A__ = probs.topk(_a )
A__ = []
A__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(),predictions.tolist() ) ):
A__ = []
for v, p in zip(_values,_predictions ):
# Copy is important since we're going to modify this array in place
A__ = input_ids.numpy().copy()
if target_ids is not None:
A__ = target_ids[p].tolist()
A__ = p
# Filter padding out:
A__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A__ = self.tokenizer.decode(_a,skip_special_tokens=_a )
A__ = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : List[Any]=None )-> str:
'''simple docstring'''
if isinstance(_a,_a ):
A__ = [targets]
try:
A__ = self.tokenizer.get_vocab()
except Exception:
A__ = {}
A__ = []
for target in targets:
A__ = vocab.get(_a,_a )
if id_ is None:
A__ = self.tokenizer(
_a,add_special_tokens=_a,return_attention_mask=_a,return_token_type_ids=_a,max_length=1,truncation=_a,)['input_ids']
if len(_a ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
'We cannot replace it with anything meaningful, ignoring it' )
continue
A__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
A__ = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A__ = np.array(_a )
return target_ids
def snake_case__ ( self : Any,lowercase_ : Any=None,lowercase_ : Optional[Any]=None )-> Dict:
'''simple docstring'''
A__ = {}
if targets is not None:
A__ = self.get_target_ids(_a,_a )
A__ = target_ids
if top_k is not None:
A__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask',self.model.base_model_prefix,'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : Tuple,lowercase_ : Any,*lowercase_ : Optional[Any],**lowercase_ : List[str] )-> str:
'''simple docstring'''
A__ = super().__call__(_a,**_a )
if isinstance(_a,_a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 363 |
import random
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : str ) -> tuple:
'''simple docstring'''
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE__ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE__ )
else:
equal.append(SCREAMING_SNAKE_CASE__ )
return less, equal, greater
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if index >= len(SCREAMING_SNAKE_CASE__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE__ , index - (m + count) )
| 282 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : list[list[int | float]] ):
'''simple docstring'''
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = len(matrix[0] )
_lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for row in range(SCREAMING_SNAKE_CASE_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = matrix[col][row] / matrix[row][row]
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_lowerCAmelCase = True
for i in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
if matrix[i][row] != 0:
_lowerCAmelCase , _lowerCAmelCase = matrix[i], matrix[row]
_lowerCAmelCase = False
break
if reduce:
rank -= 1
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __a():
'''simple docstring'''
_lowerCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Parse args
_lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
_lowerCAmelCase = parse_unknown_args(SCREAMING_SNAKE_CASE_ )
# Run
_lowerCAmelCase = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 158 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase : Union[str, Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
UpperCAmelCase : Optional[Any] = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
UpperCAmelCase : Any = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0.0
for i, j in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else 0.0
__SCREAMING_SNAKE_CASE = n_correct / len(__SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 331 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ = 10000
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ = ParquetConfig
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 331 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Dict = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
snake_case : List[Any] = logging.getLogger(__name__)
def __lowercase ( __lowerCAmelCase : str=2 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : int = 1_0 , __lowerCAmelCase : int = 2 ):
def get_dataset(__lowerCAmelCase : Dict ):
a__ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
a__ = get_dataset(__lowerCAmelCase )
a__ = get_dataset(__lowerCAmelCase )
a__ = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
a__ = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=None ):
a__ = []
for epoch in range(__lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
a__ , a__ = batch
a__ = model(__lowerCAmelCase )
a__ = torch.nn.functional.mse_loss(__lowerCAmelCase , __lowerCAmelCase )
accelerator.backward(__lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case_ (nn.Module ):
def __init__( self :Any ) -> Union[str, Any]:
super().__init__()
a__ = nn.Parameter(torch.randn(1 ) )
a__ = nn.Parameter(torch.randn(1 ) )
def lowerCamelCase__( self :List[str] ,__snake_case :Union[str, Any] ) -> str:
return x * self.a + self.b
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(total_limit=1 ,project_dir=__snake_case ,automatic_checkpoint_naming=__snake_case )
# Train baseline
a__ = Accelerator(project_config=__snake_case )
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def lowerCamelCase__( self :List[Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
# Train baseline
a__ = Accelerator()
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
a__ = os.path.join(__snake_case ,'initial' )
accelerator.save_state(__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
a__ = train(3 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = Accelerator()
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
accelerator.load_state(__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
a__ = train(2 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save everything
a__ = os.path.join(__snake_case ,'checkpoint' )
accelerator.save_state(__snake_case )
# Load everything back in and make sure all states work
accelerator.load_state(__snake_case )
test_rands += train(1 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
def lowerCamelCase__( self :str ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
accelerator.save_state()
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
a__ = train(3 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=__snake_case )
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
accelerator.load_state(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_0' ) )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
a__ = train(2 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_1' ) )
test_rands += train(1 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = torch.tensor([1, 2, 3] )
a__ = torch.tensor([2, 3, 4] )
a__ = DummyModel()
a__ = torch.optim.Adam(net.parameters() )
a__ = Accelerator()
with self.assertRaises(__snake_case ) as ve:
accelerator.register_for_checkpointing(__snake_case ,__snake_case ,__snake_case ,__snake_case )
a__ = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def lowerCamelCase__( self :List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ = torch.optim.lr_scheduler.StepLR(__snake_case ,step_size=1 ,gamma=0.99 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
accelerator.save_state()
a__ = scheduler.state_dict()
train(3 ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
self.assertNotEqual(__snake_case ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_0' ) )
self.assertEqual(__snake_case ,scheduler.state_dict() )
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = ProjectConfiguration(automatic_checkpoint_naming=__snake_case ,total_limit=2 )
# Train baseline
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ = accelerator.prepare(__snake_case )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_10' ) ) )
@require_cuda
def lowerCamelCase__( self :Dict ) -> str:
a__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
if __name__ == "__main__":
snake_case : Tuple = '''/tmp/accelerate/state_checkpointing'''
snake_case : str = DummyModel()
snake_case : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
snake_case : Union[str, Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
snake_case , snake_case : str = dummy_dataloaders()
snake_case : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
snake_case : Dict = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
snake_case , snake_case , snake_case , snake_case , snake_case : List[str] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
snake_case , snake_case : Any = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
snake_case : Any = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
snake_case : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
snake_case : int = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
snake_case : Optional[int] = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 240 | 0 |
"""simple docstring"""
from __future__ import annotations
A__ : Any = tuple[int, int, int]
A__ : Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
A__ : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
A__ : List[Any] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
A__ : Dict = 'FOBHMDKEXQNRAULPGSJVTYICZW'
A__ : List[Any] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
A__ : Optional[Any] = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
A__ : Dict = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
A__ : Optional[int] = 'SGLCPQWZHKXAREONTFBVIYJUDM'
A__ : Any = 'HVSICLTYKQUBXDWAJZOMFGPREN'
A__ : Union[str, Any] = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
A__ : List[str] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
A__ : Tuple = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _snake_case ( lowerCamelCase__ : RotorPositionT , lowerCamelCase__ : RotorSelectionT , lowerCamelCase__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCamelCase__ ) )) < 3:
lowerCamelCase_ : int =F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowerCamelCase__ )
# Checks if rotor positions are valid
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple =rotpos
if not 0 < rotorposa <= len(lowerCamelCase__ ):
lowerCamelCase_ : Optional[int] =F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowerCamelCase__ )
if not 0 < rotorposa <= len(lowerCamelCase__ ):
lowerCamelCase_ : Union[str, Any] =F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCamelCase__ )
if not 0 < rotorposa <= len(lowerCamelCase__ ):
lowerCamelCase_ : Tuple =F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCamelCase__ )
# Validates string and returns dict
lowerCamelCase_ : Tuple =_plugboard(lowerCamelCase__ )
return rotpos, rotsel, pbdict
def _snake_case ( lowerCamelCase__ : str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ : Tuple =F"""Plugboard setting isn't type string ({type(lowerCamelCase__ )})"""
raise TypeError(lowerCamelCase__ )
elif len(lowerCamelCase__ ) % 2 != 0:
lowerCamelCase_ : List[Any] =F"""Odd number of symbols ({len(lowerCamelCase__ )})"""
raise Exception(lowerCamelCase__ )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
lowerCamelCase_ : str =set()
for i in pbstring:
if i not in abc:
lowerCamelCase_ : Dict =F"""'{i}' not in list of symbols"""
raise Exception(lowerCamelCase__ )
elif i in tmppbl:
lowerCamelCase_ : str =F"""Duplicate symbol ({i})"""
raise Exception(lowerCamelCase__ )
else:
tmppbl.add(lowerCamelCase__ )
del tmppbl
# Created the dictionary
lowerCamelCase_ : List[Any] ={}
for j in range(0 , len(lowerCamelCase__ ) - 1 , 2 ):
lowerCamelCase_ : Union[str, Any] =pbstring[j + 1]
lowerCamelCase_ : List[Any] =pbstring[j]
return pb
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : RotorPositionT , lowerCamelCase__ : RotorSelectionT = (rotora, rotora, rotora) , lowerCamelCase__ : str = "" , ) -> str:
lowerCamelCase_ : List[str] =text.upper()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] =_validator(
lowerCamelCase__ , lowerCamelCase__ , plugb.upper() )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] =rotor_position
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : str =rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCamelCase_ : Union[str, Any] =[]
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCamelCase_ : Dict =plugboard[symbol]
# rotor ra --------------------------
lowerCamelCase_ : int =abc.index(lowerCamelCase__ ) + rotorposa
lowerCamelCase_ : Dict =rotora[index % len(lowerCamelCase__ )]
# rotor rb --------------------------
lowerCamelCase_ : Optional[int] =abc.index(lowerCamelCase__ ) + rotorposa
lowerCamelCase_ : List[Any] =rotora[index % len(lowerCamelCase__ )]
# rotor rc --------------------------
lowerCamelCase_ : Any =abc.index(lowerCamelCase__ ) + rotorposa
lowerCamelCase_ : Any =rotora[index % len(lowerCamelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCamelCase_ : Any =reflector[symbol]
# 2nd rotors
lowerCamelCase_ : Tuple =abc[rotora.index(lowerCamelCase__ ) - rotorposa]
lowerCamelCase_ : Union[str, Any] =abc[rotora.index(lowerCamelCase__ ) - rotorposa]
lowerCamelCase_ : Tuple =abc[rotora.index(lowerCamelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCamelCase_ : int =plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCamelCase__ ):
lowerCamelCase_ : Tuple =0
rotorposa += 1
if rotorposa >= len(lowerCamelCase__ ):
lowerCamelCase_ : List[str] =0
rotorposa += 1
if rotorposa >= len(lowerCamelCase__ ):
lowerCamelCase_ : Tuple =0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCamelCase__ )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
A__ : List[Any] = 'This is my Python script that emulates the Enigma machine from WWII.'
A__ : Union[str, Any] = (1, 1, 1)
A__ : int = 'pictures'
A__ : Union[str, Any] = (rotora, rotora, rotora)
A__ : int = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 209 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
if not head:
return True
# split the list to two parts
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =head.next, head
while fast and fast.next:
lowerCamelCase_ : Optional[Any] =fast.next.next
lowerCamelCase_ : str =slow.next
lowerCamelCase_ : Tuple =slow.next
lowerCamelCase_ : Any =None # Don't forget here! But forget still works!
# reverse the second part
lowerCamelCase_ : List[str] =None
while second:
lowerCamelCase_ : Any =second.next
lowerCamelCase_ : Union[str, Any] =node
lowerCamelCase_ : Union[str, Any] =second
lowerCamelCase_ : Optional[Any] =nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCamelCase_ : List[str] =node.next
lowerCamelCase_ : Optional[Any] =head.next
return True
def _snake_case ( lowerCamelCase__ : str ) -> Optional[int]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCamelCase_ : List[str] =head
while fast and fast.next:
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCamelCase_ : List[Any] =[slow.val]
while slow.next:
lowerCamelCase_ : List[Any] =slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCamelCase_ : Union[str, Any] =cur.next
return True
def _snake_case ( lowerCamelCase__ : Dict ) -> Optional[Any]:
if not head or not head.next:
return True
lowerCamelCase_ : Union[str, Any] ={}
lowerCamelCase_ : List[Any] =0
while head:
if head.val in d:
d[head.val].append(lowerCamelCase__ )
else:
lowerCamelCase_ : List[str] =[pos]
lowerCamelCase_ : Optional[int] =head.next
pos += 1
lowerCamelCase_ : Union[str, Any] =pos - 1
lowerCamelCase_ : Optional[int] =0
for v in d.values():
if len(lowerCamelCase__ ) % 2 != 0:
middle += 1
else:
lowerCamelCase_ : Optional[Any] =0
for i in range(0 , len(lowerCamelCase__ ) ):
if v[i] + v[len(lowerCamelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 209 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
with open(_UpperCamelCase , encoding='utf-8' ) as input_file:
UpperCAmelCase_ : int = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCAmelCase_ : Dict = input_file.read()
UpperCAmelCase_ : Union[str, Any] = regexp.search(_UpperCamelCase )
return match
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]:
with open(_UpperCamelCase , encoding='utf-8' ) as input_file:
UpperCAmelCase_ : Tuple = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCAmelCase_ : str = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase_ : Optional[int] = regexp.finditer(_UpperCamelCase )
UpperCAmelCase_ : List[str] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = Path('./datasets' )
UpperCAmelCase_ : List[Any] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCamelCase ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = Path('./datasets' )
UpperCAmelCase_ : Optional[Any] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCamelCase ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 29 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCAmelCase_ ( a__):
snake_case__ = 42
snake_case__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCAmelCase_ ( a__):
snake_case__ = 42
snake_case__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 369 | """simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = TextaTextGenerationPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
_UpperCamelCase = generator('''Something there''' )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ANY(__UpperCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_UpperCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
_UpperCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
with self.assertRaises(__UpperCamelCase ):
generator(4 )
@require_torch
def _UpperCamelCase ( self : List[str] ) -> List[str]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
_UpperCamelCase = 3
_UpperCamelCase = generator(
'''Something there''' , num_return_sequences=__UpperCamelCase , num_beams=__UpperCamelCase , )
_UpperCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = generator('''This is a test''' , do_sample=__UpperCamelCase , num_return_sequences=2 , return_tensors=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
_UpperCamelCase = generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCamelCase , )
self.assertEqual(
__UpperCamelCase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
| 54 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "EncodecFeatureExtractor"
__lowerCAmelCase = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , __A , __A ) -> Any:
super().__init__(__A , __A )
a =self.feature_extractor
a =False
def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None , __A=True ) -> int:
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self , *__A , **__A ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
a =kwargs.pop('''audio''' , __A )
a =kwargs.pop('''sampling_rate''' , __A )
a =kwargs.pop('''text''' , __A )
if len(__A ) > 0:
a =args[0]
a =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
a =self.tokenizer(__A , **__A )
if audio is not None:
a =self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
a =audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
a =audio_inputs['''padding_mask''']
return inputs
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> str:
a =kwargs.pop('''audio''' , __A )
a =kwargs.pop('''padding_mask''' , __A )
if len(__A ) > 0:
a =args[0]
a =args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[np.ndarray]:
a =to_numpy(__A )
a , a , a =audio_values.shape
if padding_mask is None:
return list(__A )
a =to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
a =seq_len - padding_mask.shape[-1]
a =1 - self.feature_extractor.padding_value
a =np.pad(__A , ((0, 0), (0, difference)) , '''constant''' , constant_values=__A )
a =audio_values.tolist()
for i in range(__A ):
a =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
a =sliced_audio.reshape(__A , -1 )
return audio_values | 81 | """simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44 | 0 |
import os
import sys
__lowercase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 367 |
from manim import *
class a ( __lowerCamelCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = Rectangle(height=0.5 ,width=0.5 )
snake_case__ : Optional[int] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
snake_case__ : Optional[Any] = Rectangle(height=0.25 ,width=0.25 )
snake_case__ : Tuple = [mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = [mem.copy() for i in range(6 )]
snake_case__ : List[str] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : List[Any] = VGroup(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : List[Any] = Text('''CPU''' ,font_size=2_4 )
snake_case__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
snake_case__ : Union[str, Any] = [mem.copy() for i in range(4 )]
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : int = Text('''GPU''' ,font_size=2_4 )
snake_case__ : Any = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
snake_case__ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Optional[Any] = Text('''Model''' ,font_size=2_4 )
snake_case__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
snake_case__ : List[str] = []
snake_case__ : int = []
for i, rect in enumerate(__lowercase ):
snake_case__ : Dict = fill.copy().set_fill(__lowercase ,opacity=0.8 )
target.move_to(__lowercase )
model_arr.append(__lowercase )
snake_case__ : Dict = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase ,*__lowercase )
snake_case__ : Tuple = [meta_mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = [meta_mem.copy() for i in range(6 )]
snake_case__ : str = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Tuple = VGroup(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Dict = Text('''Disk''' ,font_size=2_4 )
snake_case__ : Optional[Any] = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowercase ,__lowercase )
snake_case__ : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case__ : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase ,__lowercase )
snake_case__ : Any = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=1_8 ,)
blue_text.next_to(__lowercase ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__lowercase )
snake_case__ : List[str] = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=2_4 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ) )
snake_case__ : Optional[Any] = Square(0.3 )
input.set_fill(__lowercase ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__lowercase ,buff=0.5 )
self.play(Write(__lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__lowercase ,buff=0.02 )
self.play(MoveToTarget(__lowercase ) )
self.play(FadeOut(__lowercase ) )
snake_case__ : Optional[Any] = Arrow(start=__lowercase ,end=__lowercase ,color=__lowercase ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__lowercase ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
snake_case__ : Dict = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=2_4 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ,run_time=3 ) )
snake_case__ : Tuple = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__lowercase ) ,Circumscribe(model_arr[0] ,color=__lowercase ,**__lowercase ) ,Circumscribe(model_cpu_arr[0] ,color=__lowercase ,**__lowercase ) ,Circumscribe(gpu_rect[0] ,color=__lowercase ,**__lowercase ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
snake_case__ : int = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__lowercase ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
snake_case__ : Tuple = AnimationGroup(
FadeOut(__lowercase ,run_time=0.5 ) ,MoveToTarget(__lowercase ,run_time=0.5 ) ,FadeIn(__lowercase ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
snake_case__ : str = 0.7
self.play(
Circumscribe(model_arr[i] ,**__lowercase ) ,Circumscribe(cpu_left_col_base[i] ,**__lowercase ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__lowercase ,**__lowercase ) ,Circumscribe(gpu_rect[0] ,color=__lowercase ,**__lowercase ) ,Circumscribe(model_arr[i + 1] ,color=__lowercase ,**__lowercase ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__lowercase ,**__lowercase ) ,Circumscribe(cpu_left_col_base[-1] ,color=__lowercase ,**__lowercase ) ,Circumscribe(gpu_rect[0] ,color=__lowercase ,**__lowercase ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
snake_case__ : List[str] = a_c
snake_case__ : Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__lowercase ) ,FadeOut(__lowercase ,run_time=0.5 ) ,)
snake_case__ : Optional[int] = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ,run_time=3 ) ,MoveToTarget(__lowercase ) )
self.wait()
| 44 | 0 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
snake_case_ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283 | 0 |
"""simple docstring"""
from typing import Any
class __snake_case :
def __init__( self , lowercase) -> Tuple:
'''simple docstring'''
a__: str = data
a__: Tuple = None
class __snake_case :
def __init__( self) -> List[str]:
'''simple docstring'''
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: int = self.head
while temp is not None:
print(temp.data , end=' ')
a__: List[str] = temp.next
print()
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: Union[str, Any] = Node(UpperCAmelCase_)
a__: int = self.head
a__: Any = new_node
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
a__: List[str] = self.head
while node_a is not None and node_a.data != node_data_a:
a__: Union[str, Any] = node_a.next
a__: str = self.head
while node_a is not None and node_a.data != node_data_a:
a__: str = node_a.next
if node_a is None or node_a is None:
return
a__: Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
lowercase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 365 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: Any = limit + 1
a__: List[str] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 203 | 0 |
def A__ ( SCREAMING_SNAKE_CASE__) -> float:
return 10 - x * x
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(SCREAMING_SNAKE_CASE__) * equation(SCREAMING_SNAKE_CASE__) >= 0:
raise ValueError("""Wrong space!""")
__snake_case: Optional[Any] = a
while (b - a) >= 0.01:
# Find middle point
__snake_case: List[str] = (a + b) / 2
# Check if middle point is root
if equation(SCREAMING_SNAKE_CASE__) == 0.0:
break
# Decide the side to repeat the steps
if equation(SCREAMING_SNAKE_CASE__) * equation(SCREAMING_SNAKE_CASE__) < 0:
__snake_case: str = c
else:
__snake_case: Dict = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 111 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any , A : Dict , A : Any ):
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(A ) for s in shape] )}.npy'''
def UpperCAmelCase__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[int]=0 , A : Tuple=(4, 4, 64, 64) , A : Tuple=False ):
__snake_case: Dict = jnp.bfloataa if fpaa else jnp.floataa
__snake_case: str = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A )
return image
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any=False , A : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
__snake_case: List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case: Union[str, Any] = """bf16""" if fpaa else None
__snake_case , __snake_case: Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
A , subfolder="""unet""" , dtype=A , revision=A )
return model, params
def UpperCAmelCase__ ( self : Tuple , A : Tuple=0 , A : str=(4, 77, 768) , A : List[str]=False ):
__snake_case: Any = jnp.bfloataa if fpaa else jnp.floataa
__snake_case: Dict = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Any] , A : str , A : Any ):
__snake_case , __snake_case: Union[str, Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=A )
__snake_case: Tuple = self.get_latents(A , fpaa=A )
__snake_case: int = self.get_encoder_hidden_states(A , fpaa=A )
__snake_case: List[Any] = model.apply(
{"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample
assert sample.shape == latents.shape
__snake_case: str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case: Optional[int] = jnp.array(A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(A , A , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Optional[Any] , A : int , A : Tuple , A : List[str] ):
__snake_case , __snake_case: Union[str, Any] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=A )
__snake_case: Optional[int] = self.get_latents(A , shape=(4, 4, 96, 96) , fpaa=A )
__snake_case: str = self.get_encoder_hidden_states(A , shape=(4, 77, 1_024) , fpaa=A )
__snake_case: str = model.apply(
{"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample
assert sample.shape == latents.shape
__snake_case: Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case: Any = jnp.array(A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(A , A , atol=1E-2 )
| 111 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> List[Any]:
try:
_lowercase : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowercase : Any = default
else:
# KEY is set, convert it to True or False.
try:
_lowercase : str = strtobool(snake_case_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
SCREAMING_SNAKE_CASE : int = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
return unittest.skip('Test was skipped' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_=None , lowerCamelCase_=None ) -> str:
if test_case is None:
return partial(snake_case_ , version=snake_case_ )
return unittest.skipUnless(is_torch_version('>=' , snake_case_ ) , F'''test requires torch version >= {version}''' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(snake_case_ )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(snake_case_ )
SCREAMING_SNAKE_CASE : int = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(snake_case_ )
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Optional[Any] = True
@classmethod
def UpperCamelCase ( cls) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = tempfile.mkdtemp()
@classmethod
def UpperCamelCase ( cls) -> Tuple:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(SCREAMING_SNAKE_CASE_)
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = mocks if isinstance(SCREAMING_SNAKE_CASE_, (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : List[str] = AcceleratorState()
_lowercase : Dict = tensor[None].clone().to(state.device )
_lowercase : Optional[Any] = gather(snake_case_ ).cpu()
_lowercase : Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , snake_case_ ):
return False
return True
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = returncode
_lowercase : List[str] = stdout
_lowercase : Union[str, Any] = stderr
async def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
while True:
_lowercase : Any = await stream.readline()
if line:
callback(snake_case_ )
else:
break
async def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False ) -> Union[str, Any]:
if echo:
print('\nRunning: ' , ' '.join(snake_case_ ) )
_lowercase : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowercase : List[Any] = []
_lowercase : List[str] = []
def tee(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="" ):
_lowercase : Tuple = line.decode('utf-8' ).rstrip()
sink.append(snake_case_ )
if not quiet:
print(snake_case_ , snake_case_ , file=snake_case_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase_ : tee(snake_case_ , snake_case_ , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase_ : tee(snake_case_ , snake_case_ , sys.stderr , label='stderr:' ) ) ),
] , timeout=snake_case_ , )
return _RunOutput(await p.wait() , snake_case_ , snake_case_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=180 , lowerCamelCase_=False , lowerCamelCase_=True ) -> Union[str, Any]:
_lowercase : List[str] = asyncio.get_event_loop()
_lowercase : Dict = loop.run_until_complete(
_stream_subprocess(snake_case_ , env=snake_case_ , stdin=snake_case_ , timeout=snake_case_ , quiet=snake_case_ , echo=snake_case_ ) )
_lowercase : int = """ """.join(snake_case_ )
if result.returncode > 0:
_lowercase : List[Any] = """\n""".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _lowerCamelCase( _UpperCAmelCase ):
pass
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
try:
_lowercase : Dict = subprocess.check_output(snake_case_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case_ , 'decode' ):
_lowercase : Optional[Any] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{" ".join(snake_case_ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 355 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : int = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
_lowercase : Union[str, Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
_lowercase : Optional[int] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowercase : Tuple = key[key.find('patch_embed' ) + len('patch_embed' )]
_lowercase : Dict = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(lowerCamelCase_ )-1}''' )
if "norm" in key:
_lowercase : Tuple = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowercase : str = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
_lowercase : Union[str, Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(lowerCamelCase_ )-1}''' )
if "layer_norm1" in key:
_lowercase : int = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_lowercase : str = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_lowercase : Optional[Any] = key[key.find('block' ) + len('block' )]
_lowercase : Optional[int] = key.replace(F'''block{idx}''' , F'''block.{int(lowerCamelCase_ )-1}''' )
if "attn.q" in key:
_lowercase : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_lowercase : str = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_lowercase : int = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_lowercase : Any = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_lowercase : str = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_lowercase : Optional[Any] = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_lowercase : List[str] = key.replace('linear_fuse.conv' , 'linear_fuse' )
_lowercase : str = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowercase : Tuple = key[key.find('linear_c' ) + len('linear_c' )]
_lowercase : List[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(lowerCamelCase_ )-1}''' )
if "bot_conv" in key:
_lowercase : Optional[int] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
_lowercase : str = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
_lowercase : List[str] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
_lowercase : List[Any] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
_lowercase : List[str] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
_lowercase : Optional[int] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
_lowercase : Any = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
_lowercase : Dict = key.replace('module.last_layer_depth' , 'head.head' )
_lowercase : Dict = value
return new_state_dict
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowercase : List[str] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_lowercase : Union[str, Any] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_lowercase : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
_lowercase : List[str] = kv_bias[: config.hidden_sizes[i]]
_lowercase : Any = kv_weight[
config.hidden_sizes[i] :, :
]
_lowercase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def UpperCamelCase_( ) -> Union[str, Any]:
_lowercase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return image
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=None ) -> Dict:
_lowercase : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowercase : str = GLPNImageProcessor()
# prepare image
_lowercase : List[Any] = prepare_img()
_lowercase : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
_lowercase : Any = torch.load(lowerCamelCase_ , map_location=torch.device('cpu' ) )
# rename keys
_lowercase : List[Any] = rename_keys(lowerCamelCase_ )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase_ , lowerCamelCase_ )
# create HuggingFace model and load state dict
_lowercase : Union[str, Any] = GLPNForDepthEstimation(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# forward pass
_lowercase : List[Any] = model(lowerCamelCase_ )
_lowercase : int = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowercase : Optional[int] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_lowercase : str = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_lowercase : Optional[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 84 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : List[Any] = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_A : int = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class a__ ( unittest.TestCase, a_ ):
def __magic_name__ ( self ):
lowercase : Tuple = load_tool("text-question-answering" )
self.tool.setup()
lowercase : Dict = load_tool("text-question-answering" , remote=_a )
def __magic_name__ ( self ):
lowercase : str = self.tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.remote_tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : int = self.tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.remote_tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
| 202 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
SCREAMING_SNAKE_CASE : Optional[int] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
if isinstance(_a , torch.Tensor ):
return image
elif isinstance(_a , PIL.Image.Image ):
lowercase_ :int = [image]
lowercase_ :List[Any] = [trans(img.convert('''RGB''' ) ) for img in image]
lowercase_ :int = torch.stack(_a )
return image
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase_ :Any = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# get the original timestep using init_timestep
lowercase_ :Any = min(int(num_inference_steps * strength ) , UpperCamelCase_ )
lowercase_ :Dict = max(num_inference_steps - init_timestep , 0 )
lowercase_ :Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
if not isinstance(UpperCamelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase_ )}" )
lowercase_ :List[Any] = image.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
lowercase_ :Tuple = init_latents.shape
lowercase_ :int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
# get latents
print('''add noise to latents at timestep''' , UpperCamelCase_ )
lowercase_ :Tuple = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Optional[int] = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCamelCase_ = None , UpperCamelCase_ = 0.8 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 50 , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ):
self.check_inputs(UpperCamelCase_ )
# 2. Preprocess image
lowercase_ :Union[str, Any] = preprocess(UpperCamelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
lowercase_ , lowercase_ :Tuple = self.get_timesteps(UpperCamelCase_ , UpperCamelCase_ , self.device )
lowercase_ :Tuple = timesteps[:1].repeat(UpperCamelCase_ )
# 4. Prepare latent variables
lowercase_ :Union[str, Any] = self.prepare_latents(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.unet.dtype , self.device , UpperCamelCase_ )
lowercase_ :Optional[int] = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase_ ):
# 1. predict noise model_output
lowercase_ :Tuple = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase_ :Any = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ , ).prev_sample
lowercase_ :Any = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ :Dict = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase_ )
| 252 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : Optional[str] =field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """The column name of the images in the files."""} )
lowercase : Optional[str] =field(default=lowercase__ , metadata={"""help""": """A folder containing the training data."""} )
lowercase : Optional[str] =field(default=lowercase__ , metadata={"""help""": """A folder containing the validation data."""} )
lowercase : Optional[float] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase ( self ):
lowercase_ :int = {}
if self.train_dir is not None:
lowercase_ :Union[str, Any] = self.train_dir
if self.validation_dir is not None:
lowercase_ :int = self.validation_dir
lowercase_ :str = data_files if data_files else None
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : str =field(
default=lowercase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase : str =field(default=lowercase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase : bool =field(
default=lowercase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase : float =field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase : bool =field(
default=lowercase__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : float =field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
lowercase_ :Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase_ :str = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ :Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , _a , _a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ :Dict = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowercase_ :Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ :List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase_ :Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ :Dict = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
lowercase_ :int = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase_ :Tuple = split['''train''']
lowercase_ :Optional[int] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ :int = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_a )
elif model_args.model_name_or_path:
lowercase_ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
lowercase_ :str = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ :int = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_a )
elif model_args.model_name_or_path:
lowercase_ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_a )
else:
lowercase_ :Optional[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase_ :Dict = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase_ :str = ViTMAEForPreTraining(_a )
if training_args.do_train:
lowercase_ :str = ds['''train'''].column_names
else:
lowercase_ :Tuple = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase_ :Optional[Any] = data_args.image_column_name
elif "image" in column_names:
lowercase_ :str = '''image'''
elif "img" in column_names:
lowercase_ :Any = '''img'''
else:
lowercase_ :Optional[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase_ :int = image_processor.size['''shortest_edge''']
else:
lowercase_ :Union[str, Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase_ :List[str] = Compose(
[
Lambda(lambda _a : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_a , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_a ):
lowercase_ :List[Any] = [transforms(_a ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase_ :Tuple = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase_ :str = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_a )
# Compute absolute learning rate
lowercase_ :Any = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase_ :str = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
lowercase_ :Any = Trainer(
model=_a , args=_a , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
lowercase_ :Any = None
if training_args.resume_from_checkpoint is not None:
lowercase_ :Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ :Tuple = last_checkpoint
lowercase_ :List[Any] = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ :str = trainer.evaluate()
trainer.log_metrics('''eval''' , _a )
trainer.save_metrics('''eval''' , _a )
# Write model card and (optionally) push to hub
lowercase_ :List[Any] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 252 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase_ = self.model.config
else:
lowercase_ = config
lowercase_ = data_args
lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ = label_smoothed_nll_loss
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
if self.optimizer is None:
lowercase_ = ['''bias''', '''LayerNorm.weight''']
lowercase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ = Adafactor
lowercase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ = AdamW
lowercase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ = self.args.learning_rate
if self.sharded_ddp:
lowercase_ = OSS(
params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.lr_scheduler is None:
lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
lowercase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
return scheduler
def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2]
else:
# compute label smoothed loss
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
lowercase_ = inputs.pop('''labels''' )
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return loss
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
lowercase_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
# If PAD token is not defined at least EOS token has to be defined
lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
lowercase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ = tensor
return padded_tensor
| 30 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 315 | 0 |
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
while b:
UpperCAmelCase_ , UpperCAmelCase_ = b, a % b
return a
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(_UpperCamelCase , a % b )
def A ( ) -> List[str]:
'''simple docstring'''
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=lowercase , )
assert hasattr(self , 'env' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = F'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
A_ : Any = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowercase , instance_count=lowercase , instance_type=self.instance_type , debugger_hook_config=lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowercase , py_version='py36' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
TrainingJobAnalytics(lowercase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = self.create_estimator(lowercase )
# run training
estimator.fit()
# result dataframe
A_ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
A_ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A_ : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowercase )
| 140 | import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
A_ : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowercase )
A_ , A_ : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
__lowercase ,output_loading_info=__lowercase )
else:
A_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__lowercase )
A_ , A_ : str = ProphetNetForConditionalGeneration.from_pretrained(
__lowercase ,output_loading_info=__lowercase )
A_ : Any = ['key_proj', 'value_proj', 'query_proj']
A_ : str = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
A_ : Optional[Any] = key.split('.' )
if attributes[0] == "lm_head":
A_ : int = prophet
A_ : int = prophet_old
else:
A_ : Tuple = prophet.prophetnet
A_ : Optional[Any] = prophet_old.model
A_ : Optional[int] = False
for attribute in attributes:
if attribute in mapping:
A_ : Dict = mapping[attribute]
if not hasattr(__lowercase ,__lowercase ) and len(__lowercase ) > 0:
A_ : Union[str, Any] = attribute
elif hasattr(__lowercase ,__lowercase ):
A_ : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
A_ : List[Any] = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
A_ : Dict = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
A_ : Optional[int] = old_model.bias
logger.info(f'''{attribute} is initialized''' )
A_ : List[str] = True
break
elif attribute in special_keys and hasattr(__lowercase ,'in_proj_weight' ):
A_ : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
A_ : Optional[int] = getattr(__lowercase ,__lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
A_ : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
A_ : Any = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
A_ : Tuple = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
A_ : Tuple = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
A_ : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
A_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
A_ : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
A_ : Any = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
A_ : Union[str, Any] = True
break
if attribute.isdigit():
A_ : str = model[int(__lowercase )]
A_ : List[str] = old_model[int(__lowercase )]
else:
A_ : int = getattr(__lowercase ,__lowercase )
if old_attribute == "":
A_ : List[str] = old_model
else:
if not hasattr(__lowercase ,__lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
A_ : Union[str, Any] = getattr(__lowercase ,__lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 140 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "nat"
lowerCAmelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=64 , UpperCAmelCase=[3, 4, 6, 5] , UpperCAmelCase=[2, 4, 8, 16] , UpperCAmelCase=7 , UpperCAmelCase=3.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> str:
super().__init__(**UpperCAmelCase )
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(UpperCAmelCase )
_snake_case = num_heads
_snake_case = kernel_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = layer_norm_eps
_snake_case = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
_snake_case = layer_scale_init_value
_snake_case = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase ) + 1 )]
_snake_case, _snake_case = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names ) | 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 270 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''AutoTokenizer'''
lowerCamelCase_ = ['''tokenizer''']
lowerCamelCase_ = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , lowercase , lowercase=None ):
"""simple docstring"""
super().__init__(lowercase )
A_ : List[Any] = speaker_embeddings
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
A_ : Dict = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(lowercase , lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : Tuple = json.load(lowercase )
else:
A_ : Dict = None
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase )
A_ : Optional[Any] = {}
A_ : Dict = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : str = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : str = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' )
A_ : List[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase = None , **lowercase ):
"""simple docstring"""
A_ : Tuple = self.speaker_embeddings[voice_preset]
A_ : str = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
A_ : List[str] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
A_ : Any = np.load(lowercase )
return voice_preset_dict
def lowerCAmelCase_ ( self , lowercase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=2_5_6 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ):
A_ : Dict = voice_preset + '.npz'
A_ : Union[str, Any] = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[Any] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : List[Any] = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Any = voice_preset
return encoded_text
| 140 | from typing import Any
import numpy as np
def UpperCamelCase ( __lowercase : np.ndarray ):
'''simple docstring'''
return np.array_equal(__lowercase ,matrix.conjugate().T )
def UpperCamelCase ( __lowercase : np.ndarray ,__lowercase : np.ndarray ):
'''simple docstring'''
A_ : Union[str, Any] = v.conjugate().T
A_ : Dict = v_star.dot(__lowercase )
assert isinstance(__lowercase ,np.ndarray )
return (v_star_dot.dot(__lowercase )) / (v_star.dot(__lowercase ))
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Dict = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A_ : List[Any] = np.array([[1], [2], [3]] )
assert is_hermitian(__lowercase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(__lowercase ,__lowercase ) )
A_ : List[str] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__lowercase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(__lowercase ,__lowercase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 140 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2", "stage3"] , _lowerCamelCase=[1, 2, 3] , ) ->Dict:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : int = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : List[Any] = window_size
SCREAMING_SNAKE_CASE : Dict = mlp_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = patch_norm
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = scope
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = encoder_stride
SCREAMING_SNAKE_CASE : Any = out_features
SCREAMING_SNAKE_CASE : Tuple = out_indices
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : List[Any] = MaskFormerSwinModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = MaskFormerSwinBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Tuple = ['''stem''']
SCREAMING_SNAKE_CASE : Dict = MaskFormerSwinBackbone(config=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
def __lowerCAmelCase ( self ) ->List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Any:
return
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def __lowerCAmelCase ( self ) ->int:
pass
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Dict = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = 3
SCREAMING_SNAKE_CASE : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __lowerCAmelCase ( self ) ->int:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = 0
return t
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCamelCase ) , set_nan_tensor_to_zero(_lowerCamelCase ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has"""
F""" `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}."""
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {'''output_hidden_states''': True} )
@require_torch
class a_ ( unittest.TestCase , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : str = MaskFormerSwinConfig
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerSwinModelTester(self )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = backbone_class(_lowerCamelCase )
backbone.to(_lowerCamelCase )
backbone.eval()
SCREAMING_SNAKE_CASE : Tuple = backbone(**_lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE : Optional[Any] = backbone(**_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE : Tuple = backbone(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 19 |
import datasets
from .evaluate import evaluate
a__ : Dict = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
a__ : List[str] = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
a__ : List[Any] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE : int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : Dict = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 19 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
def __magic_name__ ( __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None ) -> str:
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class lowerCAmelCase__ :
a__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a__ : Optional[bool] = field(
default=__lowercase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a__ : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a__ : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a__ : Optional[float] = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a__ : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a__ : Optional[float] = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a__ : Optional[float] = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class lowerCAmelCase__ :
a__ : Optional[str] = field(
default=__lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a__ : Optional[str] = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a__ : bool = field(
default=__lowercase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a__ : Optional[int] = field(
default=__lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a__ : Optional[int] = field(
default=__lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a__ : Optional[int] = field(
default=__lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a__ : List[str] = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class lowerCAmelCase__ :
a__ : WavaVecaProcessor
a__ : Union[bool, str] = True
a__ : Optional[int] = None
a__ : Optional[int] = None
a__ : Optional[int] = None
a__ : Optional[int] = None
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
__lowerCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
__lowerCamelCase = self.processor.pad(
SCREAMING_SNAKE_CASE__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__lowerCamelCase = self.processor.pad(
labels=SCREAMING_SNAKE_CASE__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
__lowerCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
__lowerCamelCase = labels
return batch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : nn.Module , SCREAMING_SNAKE_CASE__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCamelCase = self._prepare_inputs(SCREAMING_SNAKE_CASE__ )
if self.use_amp:
with autocast():
__lowerCamelCase = self.compute_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = self.compute_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__lowerCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(SCREAMING_SNAKE_CASE__ ).backward()
elif self.use_apex:
with amp.scale_loss(SCREAMING_SNAKE_CASE__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(SCREAMING_SNAKE_CASE__ )
else:
loss.backward()
return loss.detach()
def __magic_name__ ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
__lowerCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
__lowerCamelCase = f'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(__lowerCAmelCase : str ):
__lowerCamelCase = re.sub(__lowerCAmelCase , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
__lowerCamelCase = train_dataset.map(__lowerCAmelCase , remove_columns=['''sentence'''] )
__lowerCamelCase = eval_dataset.map(__lowerCAmelCase , remove_columns=['''sentence'''] )
def extract_all_chars(__lowerCAmelCase : Optional[int] ):
__lowerCamelCase = ''' '''.join(batch['''text'''] )
__lowerCamelCase = list(set(__lowerCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCamelCase = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=train_dataset.column_names , )
__lowerCamelCase = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=eval_dataset.column_names , )
__lowerCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
__lowerCamelCase = {v: k for k, v in enumerate(__lowerCAmelCase )}
__lowerCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
__lowerCamelCase = len(__lowerCAmelCase )
__lowerCamelCase = len(__lowerCAmelCase )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase )
__lowerCamelCase = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
__lowerCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__lowerCamelCase = min(len(__lowerCAmelCase ) , data_args.max_train_samples )
__lowerCamelCase = train_dataset.select(range(__lowerCAmelCase ) )
if data_args.max_val_samples is not None:
__lowerCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCamelCase = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__lowerCAmelCase : Tuple ):
__lowerCamelCase , __lowerCamelCase = torchaudio.load(batch['''path'''] )
__lowerCamelCase = resampler(__lowerCAmelCase ).squeeze().numpy()
__lowerCamelCase = 1_6000
__lowerCamelCase = batch['''text''']
return batch
__lowerCamelCase = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__lowerCamelCase = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__lowerCAmelCase : str ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
__lowerCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__lowerCAmelCase )
return batch
__lowerCamelCase = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
__lowerCamelCase = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__lowerCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__lowerCAmelCase : int ):
__lowerCamelCase = pred.predictions
__lowerCamelCase = np.argmax(__lowerCAmelCase , axis=-1 )
__lowerCamelCase = processor.tokenizer.pad_token_id
__lowerCamelCase = processor.batch_decode(__lowerCAmelCase )
# we do not want to group tokens when computing the metrics
__lowerCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__lowerCAmelCase )
__lowerCamelCase = wer_metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCamelCase = DataCollatorCTCWithPadding(processor=__lowerCAmelCase , padding=__lowerCAmelCase )
# Initialize our Trainer
__lowerCamelCase = CTCTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCamelCase = model_args.model_name_or_path
else:
__lowerCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCamelCase = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
__lowerCamelCase = train_result.metrics
__lowerCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
__lowerCamelCase = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics('''train''' , __lowerCAmelCase )
trainer.save_metrics('''train''' , __lowerCAmelCase )
trainer.save_state()
# Evaluation
__lowerCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowerCamelCase = trainer.evaluate()
__lowerCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__lowerCAmelCase )
__lowerCamelCase = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics('''eval''' , __lowerCAmelCase )
trainer.save_metrics('''eval''' , __lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 270 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
@abstractmethod
def __A ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def __A ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
| 270 | 1 |
from math import sqrt
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = 0
for i in range(1, int(sqrt(UpperCamelCase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase__ ):
total += i + n // i
elif i == sqrt(UpperCamelCase__ ):
total += i
return total - n
def lowerCamelCase_ ( UpperCamelCase__ : int = 1_0000 ):
'''simple docstring'''
UpperCamelCase__ = sum(
i
for i in range(1, UpperCamelCase__ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase__ ) ) == i and sum_of_divisors(UpperCamelCase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 35 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_UpperCAmelCase : Tuple =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , __lowercase=3_2 ) -> Tuple:
set_seed(0 )
lowerCAmelCase_ : Optional[int] = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
lowerCAmelCase_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase_ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=lowerCAmelCase__ , )
lowerCAmelCase_ : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase_ : Tuple = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
lowerCAmelCase_ : Tuple = [torch.randn((4, 3, 3_2, 3_2) ).to(lowerCAmelCase__ ) for _ in range(4 )]
lowerCAmelCase_ : Union[str, Any] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase_ : Optional[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase_ : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase_ : Optional[int] = model(lowerCAmelCase__ , timesteps[i] ).sample
lowerCAmelCase_ : int = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase_ : List[str] = self.get_model_optimizer(resolution=3_2 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase_ : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ , timesteps[i] ).sample
lowerCAmelCase_ : List[Any] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) ) | 262 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 | 0 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase__ = get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
__A = '''dummy_data'''
__A = '''datasets'''
__A = False
def __init__( self : int , lowercase_ : str , lowercase_ : str , lowercase_ : Union[Version, str] , lowercase_ : Optional[str] = None , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[List[Callable]] = None , ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = dataset_name
_UpperCamelCase = cache_dir
_UpperCamelCase = use_local_dummy_data
_UpperCamelCase = config
# download_callbacks take a single url as input
_UpperCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_UpperCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_UpperCamelCase = str(lowercase_)
# to be downloaded
_UpperCamelCase = None
_UpperCamelCase = None
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
if self._dummy_file is None:
_UpperCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name)
@property
def __UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip")
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_UpperCamelCase = cached_path(
lowercase_ , cache_dir=self.cache_dir , extract_compressed_file=lowercase_ , force_extract=lowercase_)
return os.path.join(lowercase_ , self.dummy_file_name)
@property
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def __UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
if self._bucket_url is None:
_UpperCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/"))
return self._bucket_url
@property
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/").split("/")[:-1])
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict , *lowercase_ : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_UpperCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_UpperCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase_ , lowercase_):
return self.create_dummy_data_dict(lowercase_ , lowercase_)
elif isinstance(lowercase_ , (list, tuple)):
return self.create_dummy_data_list(lowercase_ , lowercase_)
else:
return self.create_dummy_data_single(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any , *lowercase_ : List[str]) -> List[Any]:
"""simple docstring"""
return self.download_and_extract(lowercase_)
def __UpperCAmelCase ( self : Tuple , lowercase_ : int , lowercase_ : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.download_and_extract(lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , *lowercase_ : Tuple , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
return path
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
return {}
def __UpperCAmelCase ( self : Tuple , lowercase_ : Tuple , lowercase_ : Any) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase_ , lowercase_):
for single_url in single_urls:
download_callback(lowercase_)
else:
_UpperCamelCase = single_urls
download_callback(lowercase_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = [os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_).name)) for x in single_urls]
else:
_UpperCamelCase = single_urls
_UpperCamelCase = os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_).name))
_UpperCamelCase = value
# make sure that values are unique
if all(isinstance(lowercase_ , lowercase_) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
_UpperCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCAmelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_UpperCamelCase = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowercase_)) for url in data_url)
_UpperCamelCase = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
_UpperCamelCase = [data_url[0]] * len(lowercase_)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCamelCase = os.path.join(lowercase_ , urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(lowercase_)
return dummy_data_list
def __UpperCAmelCase ( self : Dict , lowercase_ : List[str] , lowercase_ : Dict) -> Tuple:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(lowercase_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCamelCase = os.path.join(lowercase_ , urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(lowercase_) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : List[str] , lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
def _iter_archive_members(lowercase_ : Any):
# this preserves the order of the members inside the ZIP archive
_UpperCamelCase = Path(self.dummy_file).parent
_UpperCamelCase = path.relative_to(lowercase_)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
_UpperCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(lowercase_)
_UpperCamelCase = Path(lowercase_)
_UpperCamelCase = _iter_archive_members(lowercase_) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(lowercase_).as_posix(), file_path.open("rb")
def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> Dict:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
_UpperCamelCase = [paths]
for path in paths:
if os.path.isfile(lowercase_):
if os.path.basename(lowercase_).startswith((".", "__")):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase_):
if os.path.basename(lowercase_).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(lowercase_):
if filename.startswith((".", "__")):
continue
yield os.path.join(lowercase_ , lowercase_)
| 63 | import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = MobileBertConfig.from_json_file(a__ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
_UpperCamelCase = load_tf_weights_in_mobilebert(a__ , a__ , a__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 63 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCAmelCase : List[Any] =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[int] , *lowercase : Any , **lowercase : List[Any] ):
"""simple docstring"""
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 223 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( _lowerCAmelCase ):
__A = "vit_mae"
def __init__( self : Any , lowercase : int=768 , lowercase : Tuple=12 , lowercase : str=12 , lowercase : Optional[Any]=3_072 , lowercase : List[Any]="gelu" , lowercase : Tuple=0.0 , lowercase : Union[str, Any]=0.0 , lowercase : str=0.02 , lowercase : Optional[int]=1e-1_2 , lowercase : List[Any]=224 , lowercase : str=16 , lowercase : List[str]=3 , lowercase : Optional[Any]=True , lowercase : int=16 , lowercase : Optional[Any]=512 , lowercase : Optional[Any]=8 , lowercase : Optional[Any]=2_048 , lowercase : List[str]=0.75 , lowercase : str=False , **lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Any = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Optional[Any] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Optional[int] = hidden_act
lowercase_ :str = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :str = initializer_range
lowercase_ :Optional[int] = layer_norm_eps
lowercase_ :str = image_size
lowercase_ :Union[str, Any] = patch_size
lowercase_ :Dict = num_channels
lowercase_ :Any = qkv_bias
lowercase_ :Optional[int] = decoder_num_attention_heads
lowercase_ :Optional[Any] = decoder_hidden_size
lowercase_ :Union[str, Any] = decoder_num_hidden_layers
lowercase_ :List[Any] = decoder_intermediate_size
lowercase_ :Optional[Any] = mask_ratio
lowercase_ :Optional[Any] = norm_pix_loss
| 223 | 1 |
import pprint
import requests
UpperCAmelCase_ = """https://zenquotes.io/api"""
def lowerCamelCase__ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def lowerCamelCase__ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCAmelCase_ = random_quotes()
pprint.pprint(response)
| 295 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
A : Dict = False
class _lowercase ( unittest.TestCase):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : List[str] = "A painting of a squirrel eating a burger "
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : Dict = pipe(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : int = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : int = generator.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = "A painting of a squirrel eating a burger "
lowerCamelCase__ : List[str] = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowerCamelCase__ : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 184 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase_ ( _A : Optional[int] , _A : bool = True , _A : float = math.inf , _A : float = -math.inf , _A : float = math.inf , _A : float = -math.inf , _A : bool = False , _A : float = 100 , _A : float = 0.01 , _A : float = 1 , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Optional[Any] = search_prob
lowerCamelCase__ : List[str] = start_temperate
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Optional[Any] = None
while not search_end:
lowerCamelCase__ : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase__ : List[Any] = current_state
scores.append(_A )
iterations += 1
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase__ : Any = random.randint(0 , len(_A ) - 1 ) # picking a random neighbor
lowerCamelCase__ : List[Any] = neighbors.pop(_A )
lowerCamelCase__ : Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase__ : Dict = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase__ : Optional[Any] = picked_neighbor
else:
lowerCamelCase__ : str = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase__ : Optional[int] = picked_neighbor
lowerCamelCase__ : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase__ : int = True
else:
lowerCamelCase__ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_A ) , _A )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase_ ( _A : List[str] , _A : int ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : Dict = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
A : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowercase_ ( _A : int , _A : Any ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
A : Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
A : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
| 184 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: int = "Hello, World!"
__magic_name__: Tuple = "en_XX"
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : List[str] = Path("""data_bin""" )
__magic_name__ : List[str] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_A ).parent ), checkpoint_file=Path(_A ).name, _name="""xmod_base""", arch="""xmod_base""", task="""multilingual_masked_lm""", data_name_or_path=str(_A ), bpe="""sentencepiece""", sentencepiece_model=str(Path(_A ).parent / """sentencepiece.bpe.model""" ), src_dict=str(data_dir / """dict.txt""" ), )
xmod.eval() # disable dropout
print(_A )
__magic_name__ : Optional[Any] = xmod.model.encoder.sentence_encoder
__magic_name__ : Union[str, Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, """bottleneck""", 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
__magic_name__ : int = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""", _A )
__magic_name__ : List[Any] = XmodForSequenceClassification(_A ) if classification_head else XmodForMaskedLM(_A )
model.eval()
# Now let's copy all the weights.
# Embeddings
__magic_name__ : Dict = xmod_sent_encoder.embed_tokens.weight
__magic_name__ : List[Any] = xmod_sent_encoder.embed_positions.weight
__magic_name__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__magic_name__ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
__magic_name__ : Any = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__magic_name__ : Any = model.roberta.encoder.layer[i]
__magic_name__ : Union[str, Any] = xmod_sent_encoder.layers[i]
# self attention
__magic_name__ : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
__magic_name__ : Optional[Any] = xmod_layer.self_attn.q_proj.weight
__magic_name__ : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__magic_name__ : Union[str, Any] = xmod_layer.self_attn.k_proj.weight
__magic_name__ : Tuple = xmod_layer.self_attn.k_proj.bias
__magic_name__ : Any = xmod_layer.self_attn.v_proj.weight
__magic_name__ : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
__magic_name__ : List[str] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
__magic_name__ : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__magic_name__ : List[str] = xmod_layer.self_attn.out_proj.bias
__magic_name__ : List[Any] = xmod_layer.self_attn_layer_norm.weight
__magic_name__ : List[str] = xmod_layer.self_attn_layer_norm.bias
# intermediate
__magic_name__ : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
__magic_name__ : int = xmod_layer.fca.weight
__magic_name__ : str = xmod_layer.fca.bias
# output
__magic_name__ : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
__magic_name__ : Tuple = xmod_layer.fca.weight
__magic_name__ : Union[str, Any] = xmod_layer.fca.bias
__magic_name__ : Optional[Any] = xmod_layer.final_layer_norm.weight
__magic_name__ : Any = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__magic_name__ : Optional[int] = xmod_layer.adapter_layer_norm.weight
__magic_name__ : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__magic_name__ : Optional[int] = bert_output.adapter_modules[lang_code]
__magic_name__ : Dict = xmod_layer.adapter_modules[lang_code]
__magic_name__ : Any = from_adapter.fca.weight
__magic_name__ : List[Any] = from_adapter.fca.bias
__magic_name__ : List[Any] = from_adapter.fca.weight
__magic_name__ : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__magic_name__ : Tuple = xmod_sent_encoder.layer_norm.weight
__magic_name__ : Optional[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__magic_name__ : str = xmod.model.classification_heads["""mnli"""].dense.weight
__magic_name__ : str = xmod.model.classification_heads["""mnli"""].dense.bias
__magic_name__ : List[Any] = xmod.model.classification_heads["""mnli"""].out_proj.weight
__magic_name__ : Optional[Any] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__magic_name__ : int = xmod.model.encoder.lm_head.dense.weight
__magic_name__ : int = xmod.model.encoder.lm_head.dense.bias
__magic_name__ : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
__magic_name__ : Optional[int] = xmod.model.encoder.lm_head.layer_norm.bias
__magic_name__ : int = xmod.model.encoder.lm_head.weight
__magic_name__ : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__magic_name__ : int = xmod.encode(_A ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_A )
__magic_name__ : Optional[int] = model(_A )[0]
if classification_head:
__magic_name__ : Union[str, Any] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_A ) )
else:
__magic_name__ : Optional[int] = xmod.model(_A, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
__magic_name__ : int = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__magic_name__ : List[str] = torch.allclose(_A, _A, atol=1e-3 )
print("""Do both models output the same tensors?""", """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_A ).mkdir(parents=_A, exist_ok=_A )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
if __name__ == "__main__":
__magic_name__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__magic_name__: Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 370 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__magic_name__: Union[str, Any] = False
@skip_mps
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[int] = StableDiffusionAttendAndExcitePipeline
lowercase__ : Tuple = False
lowercase__ : List[str] = TEXT_TO_IMAGE_PARAMS
lowercase__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
lowercase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __magic_name__ ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> Optional[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , )
__magic_name__ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__magic_name__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__magic_name__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__magic_name__ : Any = CLIPTextModel(lowerCAmelCase__ )
__magic_name__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[Any]:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : int = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = """cpu"""
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : int = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : str = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__magic_name__ : Dict = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
__magic_name__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def __magic_name__ ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self ) -> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __magic_name__ ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __magic_name__ ( self ) -> Any:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Dict:
super().test_save_load_local(expected_max_difference=5e-4 )
def __magic_name__ ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase ):
@classmethod
def __magic_name__ ( cls ) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> List[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = torch.manual_seed(51 )
__magic_name__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__magic_name__ : List[str] = """a painting of an elephant with glasses"""
__magic_name__ : Any = [5, 7]
__magic_name__ : List[Any] = pipe(
prompt=lowerCAmelCase__ , token_indices=lowerCAmelCase__ , guidance_scale=7.5 , generator=lowerCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
__magic_name__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 138 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =XLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase =torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
_lowercase =torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_lowercase =torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowercase =model(snake_case_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1e-3 ) )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =XLMRobertaModel.from_pretrained('xlm-roberta-large' )
_lowercase =torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
_lowercase =torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
_lowercase =torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowercase =model(snake_case_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1e-3 ) )
| 205 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35 | 0 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ ,lowercase__ ,lowercase__ ) ,2_1_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.assertRaisesRegex(lowercase__ ,'''Weight can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.assertRaisesRegex(lowercase__ ,'''Profit can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.assertRaisesRegex(
lowercase__ ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : str ,lowercase__ : List[str]=1_3 ,lowercase__ : str=7 ,lowercase__ : Tuple=True ,lowercase__ : Dict=True ,lowercase__ : str=True ,lowercase__ : Optional[Any]=True ,lowercase__ : List[str]=9_9 ,lowercase__ : int=[1, 1, 2] ,lowercase__ : int=1 ,lowercase__ : Tuple=3_2 ,lowercase__ : Union[str, Any]=4 ,lowercase__ : Tuple=8 ,lowercase__ : Any=3_7 ,lowercase__ : Union[str, Any]="gelu_new" ,lowercase__ : Tuple=0.1 ,lowercase__ : int=0.1 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Union[str, Any]=5_1_2 ,lowercase__ : Dict=3 ,lowercase__ : Union[str, Any]=0.0_2 ,lowercase__ : Any=3 ,lowercase__ : Tuple=4 ,lowercase__ : Dict=None ,lowercase__ : List[Any]=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : str ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Tuple ,lowercase__ : List[str] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : Any ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : int = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 52 | 1 |
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__ = list[list[float | int]]
def _a ( SCREAMING_SNAKE_CASE_ : Matrix , SCREAMING_SNAKE_CASE_ : Matrix ):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE_ )]
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for row in range(SCREAMING_SNAKE_CASE_ ):
for col in range(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = matrix[row][col]
__lowerCAmelCase = vector[row][0]
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while row < size and col < size:
# pivoting
__lowerCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowerCAmelCase , __lowerCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = augmented[rowa][col] / augmented[row][col]
__lowerCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , SCREAMING_SNAKE_CASE_ ):
for row in range(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(SCREAMING_SNAKE_CASE_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE_ )
]
def _a ( SCREAMING_SNAKE_CASE_ : list[int] ):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
__lowerCAmelCase = [[0] for _ in range(SCREAMING_SNAKE_CASE_ )]
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE_ ):
for col in range(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = (x_val + 1) ** (size - col - 1)
__lowerCAmelCase = y_val
__lowerCAmelCase = solve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def interpolated_func(SCREAMING_SNAKE_CASE_ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(SCREAMING_SNAKE_CASE_ ) )
return interpolated_func
def _a ( SCREAMING_SNAKE_CASE_ : int ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _a ( SCREAMING_SNAKE_CASE_ : Callable[[int], int] = question_function , SCREAMING_SNAKE_CASE_ : int = 10 ):
__lowerCAmelCase = [func(SCREAMING_SNAKE_CASE_ ) for x_val in range(1 , order + 1 )]
__lowerCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowerCAmelCase = 0
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for poly in polynomials:
__lowerCAmelCase = 1
while func(SCREAMING_SNAKE_CASE_ ) == poly(SCREAMING_SNAKE_CASE_ ):
x_val += 1
ret += poly(SCREAMING_SNAKE_CASE_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCamelCase__ = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=1_2 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Any=9_9 , __lowerCAmelCase : int=3_2 , __lowerCAmelCase : Tuple=3_2 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Union[str, Any]=3_7 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Any=None , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : int = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[Any] = use_input_mask
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Dict = projection_dim
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[Any] = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[Any] = scope
_lowerCamelCase : Optional[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Any = None
if self.use_input_mask:
_lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCamelCase : List[str] = input_mask.numpy()
_lowerCamelCase , _lowerCamelCase : Optional[Any] = input_mask.shape
_lowerCamelCase : Any = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Dict = 1
_lowerCamelCase : str = 0
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = TFBlipTextModel(config=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , training=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
snake_case__ : Union[str, Any] = False
snake_case__ : Dict = False
snake_case__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = BlipTextModelTester(self )
_lowerCamelCase : int = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = TFBlipTextModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCAmelCase )
| 175 |
"""simple docstring"""
def snake_case_ ( A_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(A_, A_ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(A_, A_ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 1 |
from math import pi, sqrt, tan
def _UpperCamelCase ( snake_case__ ) -> float:
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCamelCase ( snake_case__ ) -> float:
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _UpperCamelCase ( snake_case__ ) -> float:
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
__UpperCAmelCase : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__lowerCAmelCase, 2 ) * torus_radius * tube_radius
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _UpperCamelCase ( snake_case__ ) -> float:
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
__UpperCAmelCase : Dict = (sidea + sidea + sidea) / 2
__UpperCAmelCase : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _UpperCamelCase ( snake_case__ ) -> float:
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if not isinstance(__lowerCAmelCase, __lowerCAmelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 157 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39 | 0 |
lowercase : int = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowercase : Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowercase : Any = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 362 |
def A_ ( A__ ) -> int:
if not isinstance(A__ , A__ ):
raise TypeError('only integers accepted as input' )
else:
a__ : List[Any] = str(abs(A__ ) )
a__ : Optional[int] = [list(A__ ) for char in range(len(A__ ) )]
for index in range(len(A__ ) ):
num_transpositions[index].pop(A__ )
return max(
int(''.join(list(A__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 225 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''openai/whisper-base'''
a__ =(
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
a__ ='''transcriber'''
a__ =WhisperProcessor
a__ =WhisperForConditionalGeneration
a__ =['''audio''']
a__ =['''text''']
def __lowerCAmelCase ( self , A ) -> Dict:
return self.pre_processor(A , return_tensors='''pt''' ).input_features
def __lowerCAmelCase ( self , A ) -> Tuple:
return self.model.generate(inputs=A )
def __lowerCAmelCase ( self , A ) -> Any:
return self.pre_processor.batch_decode(A , skip_special_tokens=A )[0]
| 263 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 360 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> Optional[int]:
super().__init__(*A , **A )
requires_backends(self , """decord""" )
self.check_model_type(A )
def UpperCAmelCase ( self , A=None , A=None , A=None ) -> int:
snake_case : Any = {}
if frame_sampling_rate is not None:
snake_case : int = frame_sampling_rate
if num_frames is not None:
snake_case : Union[str, Any] = num_frames
snake_case : str = {}
if top_k is not None:
snake_case : Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A , **A ) -> Dict:
return super().__call__(A , **A )
def UpperCAmelCase ( self , A , A=None , A=1 ) -> Tuple:
if num_frames is None:
snake_case : Tuple = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
snake_case : Optional[int] = BytesIO(requests.get(A ).content )
snake_case : Optional[Any] = VideoReader(A )
videoreader.seek(0 )
snake_case : Optional[Any] = 0
snake_case : Optional[Any] = num_frames * frame_sampling_rate - 1
snake_case : Any = np.linspace(A , A , num=A , dtype=np.intaa )
snake_case : int = videoreader.get_batch(A ).asnumpy()
snake_case : List[Any] = list(A )
snake_case : Optional[Any] = self.image_processor(A , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : Dict = self.model(**A )
return model_outputs
def UpperCAmelCase ( self , A , A=5 ) -> int:
if top_k > self.model.config.num_labels:
snake_case : str = self.model.config.num_labels
if self.framework == "pt":
snake_case : List[Any] = model_outputs.logits.softmax(-1 )[0]
snake_case , snake_case : Tuple = probs.topk(A )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
snake_case : List[Any] = scores.tolist()
snake_case : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A , A )]
| 176 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __snake_case ( __lowerCAmelCase ):
a__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 2_55 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**__UpperCamelCase)
a__: List[str] = size if size is not None else {'shortest_edge': 2_24}
a__: Any = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
a__: Dict = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__: Union[str, Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name='crop_size')
a__: int = do_resize
a__: Union[str, Any] = size
a__: List[str] = resample
a__: Optional[Any] = do_center_crop
a__: List[str] = crop_size
a__: Any = do_rescale
a__: str = rescale_factor
a__: Tuple = do_normalize
a__: Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a__: Dict = image_std if image_std is not None else OPENAI_CLIP_STD
a__: Tuple = do_convert_rgb
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
a__: Dict = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
a__: Dict = get_resize_output_image_size(__UpperCamelCase , size=size['shortest_edge'] , default_to_square=__UpperCamelCase)
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
a__: str = get_size_dict(__UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
a__: Tuple = do_resize if do_resize is not None else self.do_resize
a__: Any = size if size is not None else self.size
a__: Optional[Any] = get_size_dict(__UpperCamelCase , param_name='size' , default_to_square=__UpperCamelCase)
a__: Optional[int] = resample if resample is not None else self.resample
a__: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
a__: List[str] = crop_size if crop_size is not None else self.crop_size
a__: Union[str, Any] = get_size_dict(__UpperCamelCase , param_name='crop_size' , default_to_square=__UpperCamelCase)
a__: List[Any] = do_rescale if do_rescale is not None else self.do_rescale
a__: List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__: Any = do_normalize if do_normalize is not None else self.do_normalize
a__: Optional[Any] = image_mean if image_mean is not None else self.image_mean
a__: Optional[int] = image_std if image_std is not None else self.image_std
a__: List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a__: List[Any] = make_list_of_images(__UpperCamelCase)
if not valid_images(__UpperCamelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a__: Optional[Any] = [convert_to_rgb(__UpperCamelCase) for image in images]
# All transformations expect numpy arrays.
a__: Dict = [to_numpy_array(__UpperCamelCase) for image in images]
if do_resize:
a__: Tuple = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase) for image in images]
if do_center_crop:
a__: int = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase) for image in images]
if do_rescale:
a__: str = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase) for image in images]
if do_normalize:
a__: List[Any] = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase) for image in images]
a__: Union[str, Any] = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase) for image in images]
a__: Tuple = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase)
| 290 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = 0
for index, char in enumerate(_SCREAMING_SNAKE_CASE ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCAmelCase = index + 1
elif index + 1 == len(_SCREAMING_SNAKE_CASE ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 260 | 0 |
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) or number < 0:
raise ValueError("""Input must be a non-negative integer""")
__snake_case: Optional[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 |
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293 | 1 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 1_000 ):
'''simple docstring'''
lowercase__ : Optional[int] = 2**power
lowercase__ : Optional[Any] = 0
while n:
lowercase__ , lowercase__ : Union[str, Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 214 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
snake_case_ , snake_case_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
snake_case_ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
snake_case_ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
snake_case_ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 214 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a__ ( UpperCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def a__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : np.ndarray , UpperCAmelCase : np.ndarray ) -> np.ndarray:
UpperCAmelCase : List[str] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCAmelCase , UpperCAmelCase )
# Predict target for test data
UpperCAmelCase : Any = xgb.predict(UpperCAmelCase )
UpperCAmelCase : Dict = predictions.reshape(len(UpperCAmelCase ) , 1 )
return predictions
def a__ ( ) -> None:
UpperCAmelCase : str = fetch_california_housing()
UpperCAmelCase : List[str] = data_handling(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = train_test_split(
UpperCAmelCase , UpperCAmelCase , test_size=0.25 , random_state=1 )
UpperCAmelCase : str = xgboost(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(UpperCAmelCase , UpperCAmelCase )}''' )
print(f'''Mean Square Error : {mean_squared_error(UpperCAmelCase , UpperCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 352 |
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""onnx"""]
def __init__( self : int, *__A : Optional[Any], **__A : Dict ):
requires_backends(self, ['''onnx'''] )
@classmethod
def __magic_name__ ( cls : Any, *__A : Any, **__A : Dict ):
requires_backends(cls, ['''onnx'''] )
@classmethod
def __magic_name__ ( cls : Tuple, *__A : List[str], **__A : List[str] ):
requires_backends(cls, ['''onnx'''] )
| 99 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
A: Tuple = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
A: Any = {
"ctrl": 2_5_6,
}
A: Optional[Any] = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : Union[str, Any] = set()
UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : Tuple = char
UpperCAmelCase : str = set(UpperCamelCase )
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Dict = CONTROL_CODES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
super().__init__(unk_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase : Tuple = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase : str = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase : int = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Optional[Any] = {}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase : Optional[int] = tuple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCAmelCase : Union[str, Any] = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
UpperCAmelCase : Any = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : int = bigram
UpperCAmelCase : Any = []
UpperCAmelCase : str = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
UpperCAmelCase : List[Any] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : List[str] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Union[str, Any] = tuple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
UpperCAmelCase : Tuple = get_pairs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = """@@ """.join(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = word[:-4]
UpperCAmelCase : Union[str, Any] = word
return word
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Optional[Any] = re.findall(r"""\S+\n?""" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(""" """ ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = """ """.join(_SCREAMING_SNAKE_CASE ).replace("""@@ """ , """""" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + """\n""" )
UpperCAmelCase : int = 0
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase : Union[str, Any] = token_index
writer.write(""" """.join(_SCREAMING_SNAKE_CASE ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 109 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _snake_case ( UpperCamelCase : Callable , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase : Optional[int] = ya
UpperCAmelCase : int = xa
for k in range(UpperCamelCase ):
UpperCAmelCase : Optional[int] = y[k] + step_size * ode_func(UpperCamelCase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase , y[k] ) + ode_func(x + step_size , UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCamelCase__ = True
# Deal with multi-line cases
elif (
re.search(
rF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""", UpperCamelCase__, )
is not None
):
UpperCamelCase__ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCamelCase__ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCamelCase__ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCamelCase__ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCamelCase__ = True
if not attribute_used:
UpperCamelCase__ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCamelCase__ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCamelCase__ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCamelCase__ = True
elif attribute.endswith('''_token_id''' ):
UpperCamelCase__ = True
# configuration class specific cases
if not case_allowed:
UpperCamelCase__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
UpperCamelCase__ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase_ ( UpperCamelCase__ : Dict ):
'''simple docstring'''
UpperCamelCase__ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCamelCase__ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCamelCase__ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCamelCase__ = {}
if len(config_class.attribute_map ) > 0:
UpperCamelCase__ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCamelCase__ = inspect.getsourcefile(UpperCamelCase__ )
UpperCamelCase__ = os.path.dirname(UpperCamelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCamelCase__ = [os.path.join(UpperCamelCase__, UpperCamelCase__ ) for fn in os.listdir(UpperCamelCase__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCamelCase__ = []
for path in modeling_paths:
if os.path.isfile(UpperCamelCase__ ):
with open(UpperCamelCase__ ) as fp:
modeling_sources.append(fp.read() )
UpperCamelCase__ = []
for config_param, default_value in zip(UpperCamelCase__, UpperCamelCase__ ):
# `attributes` here is all the variant names for `config_param`
UpperCamelCase__ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCamelCase__ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda UpperCamelCase__ : inspect.isclass(UpperCamelCase__ )
and issubclass(UpperCamelCase__, UpperCamelCase__ )
and inspect.getmodule(UpperCamelCase__ ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
UpperCamelCase__ = check_config_attributes_being_used(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
UpperCamelCase__ = unused_attributes
if len(UpperCamelCase__ ) > 0:
UpperCamelCase__ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 35 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 35 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
snake_case__ = StableUnCLIPPipeline
snake_case__ = TEXT_TO_IMAGE_PARAMS
snake_case__ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
snake_case__ = False
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Dict = 32
__lowerCamelCase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCamelCase : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE__ , projection_dim=SCREAMING_SNAKE_CASE__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=SCREAMING_SNAKE_CASE__ , num_layers=1 , )
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCamelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCamelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=SCREAMING_SNAKE_CASE__ , layers_per_block=1 , upcast_attention=SCREAMING_SNAKE_CASE__ , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="v_prediction" , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = AutoencoderKL()
__lowerCamelCase : int = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
__lowerCamelCase : str = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[int] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Optional[int] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCamelCase : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCamelCase : List[Any] = pipe("anime turle" , generator=SCREAMING_SNAKE_CASE__ , output_type="np" )
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase : Tuple = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCamelCase : int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : Optional[int] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 135 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
def a__ ( lowercase : int = 1000 ) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = 1, 1
_UpperCamelCase = []
for i in range(1, n + 1 ):
_UpperCamelCase = prev_numerator + 2 * prev_denominator
_UpperCamelCase = prev_numerator + prev_denominator
if len(str(lowercase ) ) > len(str(lowercase ) ):
result.append(lowercase )
_UpperCamelCase = numerator
_UpperCamelCase = denominator
return len(lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355 |
'''simple docstring'''
import random
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case__ ( lowerCAmelCase__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
_UpperCamelCase = [ord(lowerCAmelCase__ ) for i in text]
_UpperCamelCase = []
_UpperCamelCase = []
for i in plain:
_UpperCamelCase = random.randint(1 , 300 )
_UpperCamelCase = (i + k) * k
cipher.append(lowerCAmelCase__ )
key.append(lowerCAmelCase__ )
return cipher, key
@staticmethod
def snake_case__ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = []
for i in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCAmelCase__ ) )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ , lowercase__ : List[str] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 287 | 0 |
from __future__ import annotations
def lowercase_ ( _lowerCamelCase : list):
if not nums:
raise ValueError("List is empty")
return sum(_lowerCamelCase) / len(_lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[int] = BlipImageProcessor()
lowerCAmelCase_ : Any = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowerCAmelCase_ : Union[str, Any] = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
lowerCAmelCase_ : Optional[int] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Optional[Any] , **a_ : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def lowerCamelCase ( self : int , **a_ : str ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def lowerCamelCase ( self : Optional[Any] , **a_ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase_ : int = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase_ : Dict = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase_ : Optional[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : List[str] = self.get_image_processor()
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase_ : Tuple = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase_ : Dict = self.prepare_image_inputs()
lowerCAmelCase_ : List[Any] = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase_ : List[Any] = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Any = self.get_image_processor()
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_qformer_tokenizer()
lowerCAmelCase_ : int = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : str = processor(text=__UpperCAmelCase )
lowerCAmelCase_ : str = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase_ : Any = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Any = self.get_image_processor()
lowerCAmelCase_ : Dict = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_qformer_tokenizer()
lowerCAmelCase_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase_ : Any = "lower newer"
lowerCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : List[str] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Tuple = self.get_qformer_tokenizer()
lowerCAmelCase_ : Tuple = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : Dict = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = self.get_image_processor()
lowerCAmelCase_ : str = self.get_tokenizer()
lowerCAmelCase_ : Union[str, Any] = self.get_qformer_tokenizer()
lowerCAmelCase_ : Tuple = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 354 |
"""simple docstring"""
from __future__ import annotations
import math
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , a_ : int ):
lowerCAmelCase_ : Union[str, Any] = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Union[str, Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : int = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCamelCase ( self : List[Any] , a_ : int ):
return idx * 2
def lowerCamelCase ( self : Tuple , a_ : int ):
return idx * 2 + 1
def lowerCamelCase ( self : Tuple , a_ : int , a_ : int , a_ : int , a_ : list[int] ):
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : Tuple = (left_element + right_element) // 2
self.build(self.left(a_ ) , a_ , a_ , a_ )
self.build(self.right(a_ ) , mid + 1 , a_ , a_ )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(a_ )] , self.segment_tree[self.right(a_ )] )
def lowerCamelCase ( self : Union[str, Any] , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int ):
if self.flag[idx] is True:
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : str = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : Dict = val
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(a_ ) , a_ , a_ , a_ , a_ , a_ )
self.update(self.right(a_ ) , mid + 1 , a_ , a_ , a_ , a_ )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(a_ )] , self.segment_tree[self.right(a_ )] )
return True
def lowerCamelCase ( self : int , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int ):
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = False
if left_element != right_element:
lowerCAmelCase_ : int = self.lazy[idx]
lowerCAmelCase_ : int = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Dict = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : Any = (left_element + right_element) // 2
lowerCAmelCase_ : Union[str, Any] = self.query(self.left(a_ ) , a_ , a_ , a_ , a_ )
lowerCAmelCase_ : List[str] = self.query(self.right(a_ ) , mid + 1 , a_ , a_ , a_ )
return max(a_ , a_ )
def __str__( self : str ):
return str([self.query(1 , 1 , self.size , a_ , a_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowercase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ = 15
lowercase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 161 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = DanceDiffusionPipeline
__UpperCAmelCase : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__UpperCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__UpperCAmelCase : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__UpperCAmelCase : str = False
__UpperCAmelCase : List[str] = False
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCAmelCase , use_timestep_embedding=__UpperCAmelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
UpperCAmelCase__ = IPNDMScheduler()
UpperCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def lowercase_ (self : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int]=0 ) -> Any:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase__ = torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = DanceDiffusionPipeline(**__UpperCAmelCase )
UpperCAmelCase__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase__ = pipe(**__UpperCAmelCase )
UpperCAmelCase__ = output.audios
UpperCAmelCase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCAmelCase__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowercase_ (self : Tuple ) -> Any:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def lowercase_ (self : List[str] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowercase_ (self : str ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase__ = torch_device
UpperCAmelCase__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
UpperCAmelCase__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(generator=__UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCAmelCase__ = output.audios
UpperCAmelCase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCAmelCase__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = torch_device
UpperCAmelCase__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
UpperCAmelCase__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(generator=__UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCAmelCase__ = output.audios
UpperCAmelCase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCAmelCase__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 65 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__UpperCamelCase : Tuple = logging.getLogger(__name__)
__UpperCamelCase : str = tf.data.AUTOTUNE
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : str = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=SCREAMING_SNAKE_CASE , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=SCREAMING_SNAKE_CASE , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=SCREAMING_SNAKE_CASE , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=SCREAMING_SNAKE_CASE , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=SCREAMING_SNAKE_CASE , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=SCREAMING_SNAKE_CASE , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=SCREAMING_SNAKE_CASE , help='''Model ID to upload to on the Hugging Face Hub.''' )
UpperCamelCase__ : str = parser.parse_args()
return args
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
try:
if args.tpu_name:
UpperCamelCase__ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCamelCase__ : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE )
return tpu
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = 0
for file in file_list:
UpperCamelCase__ : List[str] = file.split('''/''' )[-1]
UpperCamelCase__ : Optional[Any] = re.search(r'''-\d+-(\d+)\.tfrecord''' , SCREAMING_SNAKE_CASE ).group(1 )
UpperCamelCase__ : List[Any] = int(SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
UpperCamelCase__ : int = count_samples(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE )
if shuffle:
UpperCamelCase__ : Any = dataset.shuffle(len(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE , num_parallel_reads=SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCamelCase__ : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
UpperCamelCase__ : Dict = dataset.shuffle(args.shuffle_buffer_size )
UpperCamelCase__ : int = dataset.batch(SCREAMING_SNAKE_CASE , drop_remainder=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = dataset.prefetch(SCREAMING_SNAKE_CASE )
return dataset
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not args.no_tpu:
UpperCamelCase__ : List[Any] = initialize_tpu(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Any = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCamelCase__ : List[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCamelCase__ : Dict = tokenizer.vocab_size
UpperCamelCase__ : int = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
UpperCamelCase__ : int = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
UpperCamelCase__ : str = count_samples(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCamelCase__ : List[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCamelCase__ : List[str] = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCamelCase__ , UpperCamelCase__ : int = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE , metrics=['''accuracy'''] )
def decode_fn(SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCamelCase__ : Tuple = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCamelCase__ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
def mask_with_collator(SCREAMING_SNAKE_CASE : int ):
# TF really needs an isin() function
UpperCamelCase__ : Optional[int] = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
UpperCamelCase__ , UpperCamelCase__ : List[str] = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE , )
return batch
UpperCamelCase__ : Tuple = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCamelCase__ : List[str] = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCamelCase__ : Optional[int] = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : List[Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE ) )
model.fit(
SCREAMING_SNAKE_CASE , validation_data=SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__UpperCamelCase : Dict = parse_args()
main(args)
| 146 | 0 |
def UpperCamelCase ( _A : List[str] , _A : List[str] , _A : List[str] , _A : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
A__ = mf_knapsack(i - 1 , _A , _A , _A )
else:
A__ = max(
mf_knapsack(i - 1 , _A , _A , _A ) , mf_knapsack(i - 1 , _A , _A , j - wt[i - 1] ) + val[i - 1] , )
A__ = val
return f[i][j]
def UpperCamelCase ( _A : Dict , _A : List[str] , _A : List[str] , _A : List[str] )-> List[Any]:
"""simple docstring"""
A__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
A__ = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( _A : int , _A : list , _A : list )-> Dict:
"""simple docstring"""
if not (isinstance(_A , (list, tuple) ) and isinstance(_A , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
A__ = len(_A )
if num_items != len(_A ):
A__ = (
"The number of weights must be the same as the number of values.\n"
f"""But got {num_items} weights and {len(_A )} values"""
)
raise ValueError(_A )
for i in range(_A ):
if not isinstance(wt[i] , _A ):
A__ = (
"All weights must be integers but got weight of "
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_A )
A__ , A__ = knapsack(_A , _A , _A , _A )
A__ = set()
_construct_solution(_A , _A , _A , _A , _A )
return optimal_val, example_optional_set
def UpperCamelCase ( _A : list , _A : list , _A : int , _A : int , _A : set )-> List[str]:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_A , _A , i - 1 , _A , _A )
else:
optimal_set.add(_A )
_construct_solution(_A , _A , i - 1 , j - wt[i - 1] , _A )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = [3, 2, 4, 4]
UpperCAmelCase_ : str = [4, 3, 2, 3]
UpperCAmelCase_ : Union[str, Any] = 4
UpperCAmelCase_ : str = 6
UpperCAmelCase_ : Dict = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase_ , UpperCAmelCase_ : Dict = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 198 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def UpperCamelCase ( _A : str )-> List[str]:
"""simple docstring"""
A__ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
A__ = MaskFormerConfig(backbone_config=_A )
A__ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
A__ = 847
A__ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
A__ = 150
A__ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
A__ = 171
A__ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
A__ = 133
A__ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
A__ = 19
A__ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
A__ = 65
A__ = "mapillary-vistas-id2label.json"
A__ = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
A__ = {int(_A ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( _A : Tuple )-> Union[str, Any]:
"""simple docstring"""
A__ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase ( _A : int , _A : List[str] , _A : List[str] )-> Any:
"""simple docstring"""
A__ = dct.pop(_A )
A__ = val
def UpperCamelCase ( _A : Tuple , _A : List[str] )-> Dict:
"""simple docstring"""
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
A__ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( _A : Tuple , _A : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
A__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( )-> torch.Tensor:
"""simple docstring"""
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _A : str , _A : str , _A : str , _A : bool = False )-> Tuple:
"""simple docstring"""
A__ = get_maskformer_config(_A )
# load original state_dict
with open(_A , "rb" ) as f:
A__ = pickle.load(_A )
A__ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
A__ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_swin_q_k_v(_A , config.backbone_config )
read_in_decoder_q_k_v(_A , _A )
# update to torch tensors
for key, value in state_dict.items():
A__ = torch.from_numpy(_A )
# load 🤗 model
A__ = MaskFormerForInstanceSegmentation(_A )
model.eval()
for name, param in model.named_parameters():
print(_A , param.shape )
A__ , A__ = model.load_state_dict(_A , strict=_A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_A ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
A__ = prepare_img()
if "vistas" in model_name:
A__ = 65
elif "cityscapes" in model_name:
A__ = 65535
else:
A__ = 255
A__ = True if "ade" in model_name else False
A__ = MaskFormerImageProcessor(ignore_index=_A , reduce_labels=_A )
A__ = image_processor(_A , return_tensors="pt" )
A__ = model(**_A )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
A__ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 198 | 1 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE : Any = """sshleifer/student_marian_en_ro_6_1"""
__SCREAMING_SNAKE_CASE : int = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : Union[str, Any] , A : List[str]=False , A : Union[str, Any]=None , A : Dict=True , A : Any=True , A : Any=True , A : Tuple=True , ):
_UpperCAmelCase : List[str] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A , num_train_epochs=1 , distributed=A , extra_args_str=A , predict_with_generate=A , do_train=A , do_eval=A , do_predict=A , )
_UpperCAmelCase : int = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
if not do_eval:
return
_UpperCAmelCase : Union[str, Any] = [log for log in logs if "eval_loss" in log.keys()]
_UpperCAmelCase : Optional[int] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_UpperCAmelCase : int = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , A )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _A ( self : Tuple ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _A ( self : int ):
self.run_seqaseq_quick(distributed=A )
@require_torch_multi_gpu
def _A ( self : Any ):
self.run_seqaseq_quick(distributed=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _A ( self : Any ):
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _A ( self : Dict ):
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _A ( self : Optional[int] ):
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _A ( self : Tuple ):
self.run_seqaseq_quick(
distributed=A , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=A )
@require_apex
@require_torch_gpu
def _A ( self : Tuple ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def _A ( self : Tuple , A : int ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_UpperCAmelCase : str = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
_UpperCAmelCase : List[Any] = experiments[experiment_id]
_UpperCAmelCase : Dict = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
_UpperCAmelCase : List[str] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A , extra_args_str=data["extra_args_str"] )
_UpperCAmelCase : Tuple = len(re.findall(A , cl.err ) )
self.assertEqual(A , data["n_matches"] )
@slow
def _A ( self : int ):
_UpperCAmelCase : List[str] = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=A , learning_rate=3E-4 , num_train_epochs=10 , distributed=A , )
# Check metrics
_UpperCAmelCase : Any = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
_UpperCAmelCase : Tuple = [log for log in logs if "eval_loss" in log.keys()]
_UpperCAmelCase : Union[str, Any] = eval_metrics[0]
_UpperCAmelCase : List[str] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , A )
# test if do_predict saves generations and metrics
_UpperCAmelCase : Optional[int] = os.listdir(A )
_UpperCAmelCase : str = {os.path.basename(A ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _A ( self : int ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A : str ) -> Tuple[int, float]:
_UpperCAmelCase : int = "--skip_memory_metrics 0"
_UpperCAmelCase : Tuple = self.run_trainer(
max_len=128 , model_name=A , learning_rate=3E-4 , num_train_epochs=1 , optim=A , distributed=A , extra_args_str=A , do_eval=A , do_predict=A , n_gpus_to_use=1 , )
# Check metrics
_UpperCAmelCase : Optional[Any] = TrainerState.load_from_json(Path(A , "trainer_state.json" ) ).log_history
_UpperCAmelCase : int = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
_UpperCAmelCase : Union[str, Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
_UpperCAmelCase : Optional[int] = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_UpperCAmelCase : str = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_UpperCAmelCase : Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
_UpperCAmelCase : Union[str, Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_UpperCAmelCase : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_UpperCAmelCase : List[str] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A , A , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
A , A , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
A , A , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def _A ( self : Dict , A : int , A : str , A : int , A : float = 3E-3 , A : str = "adafactor" , A : bool = False , A : str = None , A : int = 0 , A : bool = True , A : bool = True , A : bool = True , A : bool = True , A : int = None , ):
_UpperCAmelCase : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
_UpperCAmelCase : List[str] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[Any] = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(A )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(A )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
_UpperCAmelCase : List[Any] = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(A )}
""".split()
_UpperCAmelCase : Tuple = "\n --do_predict\n ".split()
_UpperCAmelCase : Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_UpperCAmelCase : Dict = get_gpu_count()
_UpperCAmelCase : Dict = get_torch_dist_unique_port()
_UpperCAmelCase : List[str] = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
_UpperCAmelCase : str = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A , env=self.get_env() )
else:
_UpperCAmelCase : List[str] = ["run_translation.py"] + args
with patch.object(A , "argv" , A ):
main()
return output_dir
| 31 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_UpperCAmelCase : Any = n - k
# Calculate C(n,k)
for i in range(_UpperCAmelCase ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , _UpperCAmelCase ) // (node_count + 1)
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_UpperCAmelCase : List[str] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return catalan_number(_UpperCAmelCase ) * factorial(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 31 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> List[str]:
'''simple docstring'''
def is_in_circle(_lowerCamelCase : float , _lowerCamelCase : float) -> bool:
__UpperCamelCase : Any = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCamelCase : int = mean(
int(is_in_circle(uniform(-1.0 , 1.0) , uniform(-1.0 , 1.0)))
for _ in range(SCREAMING_SNAKE_CASE_))
# The ratio of the area for circle to square is pi/4.
__UpperCamelCase : List[Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}')
print(F'The numpy value of pi is {pi}')
print(F'The total error is {abs(pi - pi_estimate)}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Callable[[float], float] , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)) for _ in range(SCREAMING_SNAKE_CASE_)) * (max_value - min_value)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 1.0) -> None:
'''simple docstring'''
def identity_function(_lowerCamelCase : float) -> float:
return x
__UpperCamelCase : str = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
__UpperCamelCase : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print("******************")
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}')
print(F'Estimated value is {estimated_value}')
print(F'Expected value is {expected_value}')
print(F'Total error is {abs(estimated_value - expected_value)}')
print("******************")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> None:
'''simple docstring'''
def function_to_integrate(_lowerCamelCase : float) -> float:
return sqrt(4.0 - x * x)
__UpperCamelCase : Union[str, Any] = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0.0 , 2.0)
print("******************")
print("Estimating pi using area_under_curve_estimator")
print(F'Estimated value is {estimated_value}')
print(F'Expected value is {pi}')
print(F'Total error is {abs(estimated_value - pi)}')
print("******************")
if __name__ == "__main__":
import doctest
doctest.testmod() | 366 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(__lowercase)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Any , **a :Union[str, Any] ) -> Union[str, Any]:
super().__init__(**a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Any , a :Union[str, List[str], "Image", List["Image"]] , **a :Tuple ) -> List[str]:
return super().__call__(a , **a )
def _lowerCamelCase ( self :List[Any] , **a :List[str] ) -> List[Any]:
__UpperCamelCase : List[Any] = {}
if "candidate_labels" in kwargs:
__UpperCamelCase : Optional[int] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCamelCase : List[str] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _lowerCamelCase ( self :List[str] , a :Optional[int] , a :List[str]=None , a :Dict="This is a photo of {}." ) -> Any:
__UpperCamelCase : Dict = load_image(a )
__UpperCamelCase : Any = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase : str = candidate_labels
__UpperCamelCase : List[Any] = [hypothesis_template.format(a ) for x in candidate_labels]
__UpperCamelCase : List[Any] = self.tokenizer(a , return_tensors=self.framework , padding=a )
__UpperCamelCase : Any = [text_inputs]
return inputs
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[Any] ) -> List[Any]:
__UpperCamelCase : List[str] = model_inputs.pop("candidate_labels" )
__UpperCamelCase : Dict = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , a ):
__UpperCamelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
__UpperCamelCase : int = text_inputs[0][0]
__UpperCamelCase : str = self.model(**a , **a )
__UpperCamelCase : List[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self :List[Any] , a :List[Any] ) -> Tuple:
__UpperCamelCase : Any = model_outputs.pop("candidate_labels" )
__UpperCamelCase : Optional[Any] = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCamelCase : int = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase : List[str] = probs.tolist()
if not isinstance(a , a ):
__UpperCamelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCamelCase : Optional[int] = stable_softmax(a , axis=-1 )
__UpperCamelCase : Dict = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result | 151 | 0 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , __snake_case : str , __snake_case : Optional[int]=7 , __snake_case : int=3 , __snake_case : Tuple=30 , __snake_case : Optional[Any]=4_00 , __snake_case : str=True , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=0.9 , __snake_case : Any=None , __snake_case : int=True , __snake_case : Union[str, Any]=[0.5, 0.5, 0.5] , __snake_case : Dict=[0.5, 0.5, 0.5] , ):
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize_and_center_crop
UpperCAmelCase_ = size
UpperCAmelCase_ = crop_pct
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self : Union[str, Any] ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''crop_pct''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
def lowerCamelCase_ ( self : List[str] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 177 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['ConditionalDetrFeatureExtractor']
_lowerCamelCase = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Dict = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__A : Tuple = '''MobileNetV1Config'''
# Base docstring
__A : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
__A : Union[str, Any] = [1, 1_024, 7, 7]
# Image classification docstring
__A : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
__A : List[Any] = '''tabby, tabby cat'''
__A : Union[str, Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=None ):
lowercase_ : str = {}
if isinstance(__snake_case , __snake_case ):
lowercase_ : Union[str, Any] = model.mobilenet_va
else:
lowercase_ : Optional[Any] = model
lowercase_ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowercase_ : Union[str, Any] = backbone.conv_stem.convolution.weight
lowercase_ : Optional[Any] = backbone.conv_stem.normalization.bias
lowercase_ : Union[str, Any] = backbone.conv_stem.normalization.weight
lowercase_ : Any = backbone.conv_stem.normalization.running_mean
lowercase_ : int = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase_ : Optional[int] = i + 1
lowercase_ : Union[str, Any] = i * 2
lowercase_ : Optional[Any] = backbone.layer[pt_index]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ : str = pointer.convolution.weight
lowercase_ : int = pointer.normalization.bias
lowercase_ : Any = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Union[str, Any] = pointer.normalization.running_var
lowercase_ : Any = backbone.layer[pt_index + 1]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ : int = pointer.convolution.weight
lowercase_ : str = pointer.normalization.bias
lowercase_ : Tuple = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Any = pointer.normalization.running_var
if isinstance(__snake_case , __snake_case ):
lowercase_ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowercase_ : Any = model.classifier.weight
lowercase_ : Optional[int] = model.classifier.bias
return tf_to_pt_map
def lowercase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase_ : Tuple = tf.train.list_variables(__snake_case )
lowercase_ : int = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case )
lowercase_ : Optional[int] = array
# Build TF to PyTorch weights loading map
lowercase_ : Any = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase_ : Any = np.transpose(__snake_case , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ : Optional[int] = array.squeeze().transpose()
else:
lowercase_ : Optional[int] = np.transpose(__snake_case , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ : str = torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case , __snake_case )
tf_weights.pop(name + '''/RMSProp''' , __snake_case )
tf_weights.pop(name + '''/RMSProp_1''' , __snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( __snake_case : torch.Tensor , __snake_case : nn.Convad ):
lowercase_ , lowercase_ : Optional[int] = features.shape[-2:]
lowercase_ , lowercase_ : str = conv_layer.stride
lowercase_ , lowercase_ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ : Dict = max(kernel_height - stride_height , 0 )
else:
lowercase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ : str = max(kernel_width - stride_width , 0 )
else:
lowercase_ : int = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ : int = pad_along_width // 2
lowercase_ : Union[str, Any] = pad_along_width - pad_left
lowercase_ : Tuple = pad_along_height // 2
lowercase_ : List[str] = pad_along_height - pad_top
lowercase_ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : MobileNetVaConfig , A : int , A : int , A : int , A : Optional[int] = 1 , A : Optional[int] = 1 , A : bool = False , A : Optional[bool] = True , A : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase_ : int = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ : int = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , stride=A , padding=A , groups=A , bias=A , padding_mode='''zeros''' , )
if use_normalization:
lowercase_ : Optional[Any] = nn.BatchNormad(
num_features=A , eps=config.layer_norm_eps , momentum=0.9997 , affine=A , track_running_stats=A , )
else:
lowercase_ : Union[str, Any] = None
if use_activation:
if isinstance(A , A ):
lowercase_ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A ):
lowercase_ : Any = ACTaFN[config.hidden_act]
else:
lowercase_ : Tuple = config.hidden_act
else:
lowercase_ : Tuple = None
def A ( self : str , A : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase_ : List[Any] = apply_tf_padding(A , self.convolution )
lowercase_ : Optional[int] = self.convolution(A )
if self.normalization is not None:
lowercase_ : Union[str, Any] = self.normalization(A )
if self.activation is not None:
lowercase_ : Optional[int] = self.activation(A )
return features
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE_ : Optional[Any] = "mobilenet_v1"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pixel_values"
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Any , A : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : str , A : MobileNetVaConfig , A : bool = True ) -> int:
super().__init__(A )
lowercase_ : Union[str, Any] = config
lowercase_ : List[str] = 32
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ : Union[str, Any] = MobileNetVaConvLayer(
A , in_channels=config.num_channels , out_channels=A , kernel_size=3 , stride=2 , )
lowercase_ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ : List[Any] = nn.ModuleList()
for i in range(13 ):
lowercase_ : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=3 , stride=strides[i] , groups=A , ) )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=1 , ) )
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self : Any , A : Optional[Any] ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase_ : List[str] = self.conv_stem(A )
lowercase_ : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ : Optional[int] = layer_module(A )
if output_hidden_states:
lowercase_ : str = all_hidden_states + (hidden_states,)
lowercase_ : Tuple = hidden_states
if self.pooler is not None:
lowercase_ : Dict = torch.flatten(self.pooler(A ) , start_dim=1 )
else:
lowercase_ : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A , pooler_output=A , hidden_states=A , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : List[str] , A : MobileNetVaConfig ) -> None:
super().__init__(A )
lowercase_ : int = config.num_labels
lowercase_ : List[str] = MobileNetVaModel(A )
lowercase_ : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=A )
lowercase_ : int = nn.Linear(A , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[Any] = self.mobilenet_va(A , output_hidden_states=A , return_dict=A )
lowercase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : Dict = self.classifier(self.dropout(A ) )
lowercase_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : Optional[Any] = '''single_label_classification'''
else:
lowercase_ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
lowercase_ : List[Any] = CrossEntropyLoss()
lowercase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : str = BCEWithLogitsLoss()
lowercase_ : List[Any] = loss_fct(A , A )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A , logits=A , hidden_states=outputs.hidden_states , )
| 33 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__a = logging.getLogger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE=-1 ) -> Optional[int]:
# in NER datasets, the last column is usually reserved for NER label
UpperCAmelCase_ : str = label_idx
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[InputExample]:
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = mode.value
UpperCAmelCase_ : int = os.path.join(_SCREAMING_SNAKE_CASE ,f'''{mode}.txt''' )
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : int = []
with open(_SCREAMING_SNAKE_CASE ,encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' ,words=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[Any] = []
else:
UpperCAmelCase_ : List[str] = line.split(''' ''' )
words.append(splits[0] )
if len(_SCREAMING_SNAKE_CASE ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' ,'''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' ,words=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ) )
return examples
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Tuple = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(_SCREAMING_SNAKE_CASE )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase_ : Any = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(_SCREAMING_SNAKE_CASE )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' ,line.split()[0] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
if path:
with open(_SCREAMING_SNAKE_CASE ,'''r''' ) as f:
UpperCAmelCase_ : List[str] = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ : int = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __a( _a ):
"""simple docstring"""
def __init__( self ) -> Dict:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
if path:
with open(_SCREAMING_SNAKE_CASE ,'''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ : Union[str, Any] = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __a( _a ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[InputExample]:
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = mode.value
UpperCAmelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE ,f'''{mode}.txt''' )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Dict = []
with open(_SCREAMING_SNAKE_CASE ,encoding='''utf-8''' ) as f:
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[str] = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' ,words=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
return examples
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = 0
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Any = preds_list[example_id]
UpperCAmelCase_ : Any = ''''''
for token in sentence:
out += f'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_SCREAMING_SNAKE_CASE )
example_id += 1
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
if path:
with open(_SCREAMING_SNAKE_CASE ,'''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
] | 235 |
from math import ceil
def lowerCamelCase__ ( _lowercase = 1001 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ : List[Any] = 2 * i + 1
UpperCAmelCase_ : Optional[int] = 2 * i
UpperCAmelCase_ : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 235 | 1 |
import torch
def lowercase_( ):
'''simple docstring'''
if torch.cuda.is_available():
lowerCamelCase : List[Any] = torch.cuda.device_count()
else:
lowerCamelCase : Any = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 283 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
a : Tuple = precision
a : str = ceil(precision / 14 )
a : List[Any] = 42_6880 * Decimal(1_0005 ).sqrt()
a : Union[str, Any] = 1
a : Dict = 1359_1409
a : Optional[int] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
a : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a : Optional[Any] = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 105 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase__ =10
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
if array[i] == target:
return i
return -1
def __UpperCamelCase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
__a : List[Any] = 0
__a : Union[str, Any] = len(lowerCAmelCase__ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : str = (left + right) // 3 + 1
__a : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__a : List[str] = one_third - 1
elif array[two_third] < target:
__a : List[str] = two_third + 1
else:
__a : Dict = one_third + 1
__a : Any = two_third - 1
else:
return -1
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : Union[str, Any] = (left + right) // 3 + 1
__a : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase__ , one_third - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =input('Enter numbers separated by comma:\n').strip()
lowercase__ =[int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowercase__ =int(input('Enter the number to be found in the list:\n').strip())
lowercase__ =ite_ternary_search(collection, target)
lowercase__ =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 90 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : CommonSchedulerState
# setable values
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : Optional[int] = None
@classmethod
def lowerCAmelCase (cls : int , snake_case_ : CommonSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray ):
return cls(common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ )
@dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : DDPMSchedulerState
class UpperCamelCase__ ( __lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : str = [e.name for e in FlaxKarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : jnp.dtype
@property
def lowerCAmelCase (self : Optional[Any] ):
return True
@register_to_config
def __init__(self : Any , snake_case_ : int = 1_0_0_0 , snake_case_ : float = 0.0001 , snake_case_ : float = 0.02 , snake_case_ : str = "linear" , snake_case_ : Optional[jnp.ndarray] = None , snake_case_ : str = "fixed_small" , snake_case_ : bool = True , snake_case_ : str = "epsilon" , snake_case_ : jnp.dtype = jnp.floataa , ):
__a : str = dtype
def lowerCAmelCase (self : Any , snake_case_ : Optional[CommonSchedulerState] = None ):
if common is None:
__a : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__a : int = jnp.array(1.0 , dtype=self.dtype )
__a : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : Dict , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : int , snake_case_ : Tuple = () ):
__a : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__a : Any = (jnp.arange(0 , snake_case_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=None ):
__a : Optional[Any] = state.common.alphas_cumprod[t]
__a : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__a : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__a : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__a : Optional[Any] = jnp.clip(snake_case_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__a : int = jnp.log(jnp.clip(snake_case_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__a : List[str] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__a : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__a : Any = variance
__a : Dict = state.common.betas[t]
__a : Any = (predicted_variance + 1) / 2
__a : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase (self : Any , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : int , snake_case_ : jnp.ndarray , snake_case_ : Optional[jax.random.KeyArray] = None , snake_case_ : bool = True , ):
__a : int = timestep
if key is None:
__a : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__a , __a : List[str] = jnp.split(snake_case_ , sample.shape[1] , axis=1 )
else:
__a : int = None
# 1. compute alphas, betas
__a : Optional[int] = state.common.alphas_cumprod[t]
__a : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__a : Optional[int] = 1 - alpha_prod_t
__a : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__a : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__a : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
__a : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__a : Dict = jnp.clip(snake_case_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__a : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__a : Optional[int] = jax.random.split(snake_case_ , num=1 )
__a : Union[str, Any] = jax.random.normal(snake_case_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case_ , snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise
__a : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__a : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case_ , state=snake_case_ )
def lowerCAmelCase (self : List[str] , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return add_noise_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : str , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return get_velocity_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def __len__(self : List[str] ):
return self.config.num_train_timesteps
| 90 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = torch.device("cpu")
def A_ ( ):
SCREAMING_SNAKE_CASE:Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE:Optional[int] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
def A_ ( snake_case ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = dct.pop(snake_case )
SCREAMING_SNAKE_CASE:Tuple = val
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for k in state_dict.keys():
SCREAMING_SNAKE_CASE:Dict = k
if ".pwconv" in k:
SCREAMING_SNAKE_CASE:Tuple = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
SCREAMING_SNAKE_CASE:List[str] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
SCREAMING_SNAKE_CASE:int = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
SCREAMING_SNAKE_CASE:Dict = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
SCREAMING_SNAKE_CASE:str = k_new.split("." )
if ls[2].isdigit():
SCREAMING_SNAKE_CASE:Optional[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
SCREAMING_SNAKE_CASE:List[str] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE:List[Any] = 1000
SCREAMING_SNAKE_CASE:str = "huggingface/label-files"
SCREAMING_SNAKE_CASE:int = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE:Union[str, Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE:List[str] = {int(snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE:str = idalabel
SCREAMING_SNAKE_CASE:Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
SCREAMING_SNAKE_CASE:Optional[int] = [3, 3, 6, 4]
SCREAMING_SNAKE_CASE:str = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
SCREAMING_SNAKE_CASE:Dict = [3, 3, 9, 6]
SCREAMING_SNAKE_CASE:Union[str, Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
SCREAMING_SNAKE_CASE:str = [4, 3, 10, 5]
SCREAMING_SNAKE_CASE:int = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
SCREAMING_SNAKE_CASE:Tuple = [4, 4, 12, 6]
SCREAMING_SNAKE_CASE:Optional[int] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
SCREAMING_SNAKE_CASE:Any = torch.hub.load_state_dict_from_url(snake_case , map_location="cpu" , check_hash=snake_case )
else:
SCREAMING_SNAKE_CASE:Any = torch.load(snake_case , map_location="cpu" )
SCREAMING_SNAKE_CASE:Union[str, Any] = checkpoint
SCREAMING_SNAKE_CASE:Any = create_rename_keys(snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load HuggingFace model
SCREAMING_SNAKE_CASE:str = SwiftFormerForImageClassification(snake_case ).eval()
hf_model.load_state_dict(snake_case )
# prepare test inputs
SCREAMING_SNAKE_CASE:Tuple = prepare_img()
SCREAMING_SNAKE_CASE:Optional[Any] = ViTImageProcessor.from_pretrained("preprocessor_config" )
SCREAMING_SNAKE_CASE:List[str] = processor(images=snake_case , return_tensors="pt" )
# compare outputs from both models
SCREAMING_SNAKE_CASE:Union[str, Any] = get_expected_output(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case , atol=1e-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
A_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 139 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _a , unittest.TestCase ):
_A : str = CTRLTokenizer
_A : List[str] = False
_A : int = False
def __UpperCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE:Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
SCREAMING_SNAKE_CASE:Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE:str = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
SCREAMING_SNAKE_CASE:Union[str, Any] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE:Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Any ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Tuple = "adapt react readapt apt"
return input_text, output_text
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[str] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE:Any = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Any = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE:Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
| 139 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCamelCase__ = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None) | 182 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase : Union[str, Any] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__lowerCAmelCase : List[str] = F"{src_lang}-{tgt_lang}"
__lowerCAmelCase : Tuple = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__lowerCAmelCase : Any = os.path.join(_UpperCamelCase , 'README.md' )
print(F"Generating {path}" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split("""-""")
lowerCamelCase__ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 182 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ = 50 ) -> List[Any]:
_a : Dict = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = encoder_seq_length
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = d_ff
_a = relative_attention_num_buckets
_a = dropout_rate
_a = initializer_factor
_a = eos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = None
_a = decoder_layers
def _UpperCAmelCase ( self ) -> Dict:
return TaConfig.from_pretrained('''google/umt5-base''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
_a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
_a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
_a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a = input_ids.clamp(self.pad_token_id + 1 )
_a = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a = self.get_config()
_a = config.num_attention_heads
_a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def _UpperCAmelCase ( self ) -> int:
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self ) -> List[str]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
_a = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
_a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
_a = result.last_hidden_state
_a = result.past_key_values
_a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(__UpperCAmelCase )['''last_hidden_state''']
_a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
_a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
_a = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ : int = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ : str = True
A_ : List[str] = False
A_ : List[Any] = False
A_ : str = True
A_ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ : Optional[Any] = [0.8, 0.9]
def _UpperCAmelCase ( self ) -> Tuple:
_a = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
_a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_a = self.model_tester.prepare_config_and_inputs()
_a = config_and_inputs[0]
_a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
_a = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
_a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
_a = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _UpperCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
_a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
_a = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
_a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
_a = model.generate(input_ids.to(__UpperCAmelCase ) )
_a = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) | 320 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCAmelCase : Tuple = """src/diffusers"""
_lowerCAmelCase : Dict = """."""
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCAmelCase : Union[str, Any] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCAmelCase : List[Any] = spec.loader.load_module()
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ) -> str:
return line.startswith(_A ) or len(_A ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , _A ) is not None
def __snake_case ( _lowerCAmelCase : int ) -> str:
A_ : Optional[Any] = object_name.split("." )
A_ : Optional[Any] = 0
# First let's find the module where our object lives.
A_ : Dict = parts[i]
while i < len(_A ) and not os.path.isfile(os.path.join(_A , f"{module}.py" ) ):
i += 1
if i < len(_A ):
A_ : Dict = os.path.join(_A , parts[i] )
if i >= len(_A ):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(_A , f"{module}.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
A_ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
A_ : Dict = ''
A_ : Tuple = 0
for name in parts[i + 1 :]:
while (
line_index < len(_A ) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_A ):
raise ValueError(f" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A_ : Tuple = line_index
while line_index < len(_A ) and _should_continue(lines[line_index] , _A ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A_ : Dict = lines[start_index:line_index]
return "".join(_A )
_lowerCAmelCase : Union[str, Any] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowerCAmelCase : Optional[int] = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowerCAmelCase : str = re.compile(r'''<FILL\s+[^>]*>''')
def __snake_case ( _lowerCAmelCase : Any ) -> int:
A_ : Optional[int] = code.split("\n" )
A_ : Optional[Any] = 0
while idx < len(_A ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_A ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def __snake_case ( _lowerCAmelCase : Tuple ) -> Dict:
A_ : Optional[Any] = len(get_indent(_A ) ) > 0
if has_indent:
A_ : int = f"class Bla:\n{code}"
A_ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_A )
A_ : List[str] = black.format_str(_A , mode=_A )
A_ : List[str] = style_docstrings_in_code(_A )
return result[len("class Bla:\n" ) :] if has_indent else result
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=False ) -> Any:
with open(_A , "r" , encoding="utf-8" , newline="\n" ) as f:
A_ : str = f.readlines()
A_ : Union[str, Any] = []
A_ : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_A ):
A_ : List[str] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A_ : Optional[Any] = search.groups()
A_ : Optional[int] = find_code_in_diffusers(_A )
A_ : List[Any] = get_indent(_A )
A_ : str = line_index + 1 if indent == theoretical_indent else line_index + 2
A_ : Union[str, Any] = theoretical_indent
A_ : Dict = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A_ : Optional[int] = True
while line_index < len(_A ) and should_continue:
line_index += 1
if line_index >= len(_A ):
break
A_ : Any = lines[line_index]
A_ : Optional[int] = _should_continue(_A , _A ) and re.search(f"^{indent}# End copy" , _A ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A_ : Union[str, Any] = lines[start_index:line_index]
A_ : Optional[int] = ''.join(_A )
# Remove any nested `Copied from` comments to avoid circular copies
A_ : List[Any] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_A ) is None]
A_ : List[Any] = '\n'.join(_A )
# Before comparing, use the `replace_pattern` on the original code.
if len(_A ) > 0:
A_ : List[Any] = replace_pattern.replace("with" , "" ).split("," )
A_ : Union[str, Any] = [_re_replace_pattern.search(_A ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A_ : Tuple = pattern.groups()
A_ : Optional[Any] = re.sub(_A , _A , _A )
if option.strip() == "all-casing":
A_ : Any = re.sub(obja.lower() , obja.lower() , _A )
A_ : Optional[int] = re.sub(obja.upper() , obja.upper() , _A )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A_ : Any = blackify(lines[start_index - 1] + theoretical_code )
A_ : Optional[int] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A_ : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A_ : str = start_index + 1
if overwrite and len(_A ) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}." )
with open(_A , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_A )
return diffs
def __snake_case ( _lowerCAmelCase : Any = False ) -> Any:
A_ : Union[str, Any] = glob.glob(os.path.join(_A , "**/*.py" ) , recursive=_A )
A_ : Union[str, Any] = []
for filename in all_files:
A_ : int = is_copy_consistent(_A , _A )
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(_A ) > 0:
A_ : Any = '\n'.join(_A )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase : Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 353 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCAmelCase : Optional[Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCAmelCase : Tuple = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCAmelCase : List[str] = '''zero2'''
_lowerCAmelCase : Dict = '''zero3'''
_lowerCAmelCase : Tuple = [ZEROa, ZEROa]
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Any:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
A_ : Dict = parameterized.to_safe_name("_".join(str(_lowerCAmelCase ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
_lowerCAmelCase : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Tuple , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple , snake_case :Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : Any = models[model]
A_ : List[Any] = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :int = 1 , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : List[Any] = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
A_ : Tuple = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(snake_case )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : List[str] = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : str = self.get_launcher(snake_case )
A_ : Dict = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Optional[Any]=False ):
'''simple docstring'''
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 70 | 0 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__a = logging.get_logger(__name__)
__a = 'T5Config'
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = 'mt5'
a :List[str] = MTaConfig
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = 'mt5'
a :int = MTaConfig
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'mt5'
a :int = MTaConfig
| 30 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 30 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCAmelCase__ ( A_ , A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self , A_ = 1000 , A_ = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(A_ )
# standard deviation of the initial noise distribution
__UpperCamelCase =1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCamelCase =4
# running values
__UpperCamelCase =[]
def _a ( self , A_ , A_ = None ) -> List[Any]:
__UpperCamelCase =num_inference_steps
__UpperCamelCase =torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCamelCase =torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCamelCase =torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCamelCase =torch.sin(steps * math.pi / 2 ) ** 2
__UpperCamelCase =(1.0 - self.betas**2) ** 0.5
__UpperCamelCase =(torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCamelCase =timesteps.to(A_ )
__UpperCamelCase =[]
def _a ( self , A_ , A_ , A_ , A_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
__UpperCamelCase =(self.timesteps == timestep).nonzero().item()
__UpperCamelCase =timestep_index + 1
__UpperCamelCase =sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A_ )
if len(self.ets ) == 1:
__UpperCamelCase =self.ets[-1]
elif len(self.ets ) == 2:
__UpperCamelCase =(3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCamelCase =(23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCamelCase =(1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCamelCase =self._get_prev_sample(A_ , A_ , A_ , A_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _a ( self , A_ , *A_ , **A_ ) -> torch.FloatTensor:
return sample
def _a ( self , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =self.alphas[timestep_index]
__UpperCamelCase =self.betas[timestep_index]
__UpperCamelCase =self.alphas[prev_timestep_index]
__UpperCamelCase =self.betas[prev_timestep_index]
__UpperCamelCase =(sample - sigma * ets) / max(A_ , 1E-8 )
__UpperCamelCase =next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 117 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 117 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
__A : Optional[Any] = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def lowercase ( __snake_case : str , __snake_case : int = 1 , __snake_case : str = "new" , __snake_case : list | None = None ):
lowercase_ : Tuple = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
lowercase_ : Union[str, Any] = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__snake_case )
lowercase_ : Optional[Any] = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
lowercase_ : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
lowercase_ : str = {}
for id_ in range(__snake_case ):
lowercase_ : Dict = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 33 |
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (*UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
lowerCamelCase__: List[str] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
lowerCamelCase__: List[Any] =image_classifier(snake_case_ , candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case_) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
lowerCamelCase__: Dict =image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: str =pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf")
lowerCamelCase__: Optional[int] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
lowerCamelCase__: int =image_classifier(snake_case_ , candidate_labels=["a", "b", "c"])
self.assertEqual(
nested_simplify(snake_case_) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
lowerCamelCase__: Optional[int] =image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
[
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
{"score": 0.333, "label": ANY(snake_case_)},
],
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__: Optional[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
lowerCamelCase__: Union[str, Any] =image_classifier(snake_case_ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(snake_case_) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
lowerCamelCase__: List[Any] =image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf")
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__: Dict =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
lowerCamelCase__: Any =image_classifier(snake_case_ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(snake_case_) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
lowerCamelCase__: Optional[int] =image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 371 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__A = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="TensorFlow checkpoint path or folder.")
train_parser.add_argument(
"--pytorch_dump_output" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the PyTorch saved model output.")
train_parser.add_argument("--config" , type=UpperCAmelCase_ , default="" , help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=UpperCAmelCase_)
def __init__(self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : Optional[int] , ) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =logging.get_logger("transformers-cli/converting")
self._logger.info(F"""Loading model {model_type}""")
lowerCamelCase__: Any =model_type
lowerCamelCase__: Optional[int] =tf_checkpoint
lowerCamelCase__: Any =pytorch_dump_output
lowerCamelCase__: Union[str, Any] =config
lowerCamelCase__: str =finetuning_task_name
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase__: Tuple =self._tf_checkpoint
lowerCamelCase__: List[str] =""
else:
lowerCamelCase__: Any =self._tf_checkpoint
lowerCamelCase__: Dict =""
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase_ , self._config , self._pytorch_dump_output , UpperCAmelCase_)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]")
| 273 | 0 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : int ):
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = np.full((len(lowerCamelCase ), sequence_length, 2) , lowerCamelCase )
else:
lowerCAmelCase = np.full((len(lowerCamelCase ), sequence_length) , lowerCamelCase )
for i, tensor in enumerate(lowerCamelCase ):
if padding_side == "right":
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = tensor[:sequence_length]
else:
lowerCAmelCase = tensor[:sequence_length]
else:
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = tensor[:sequence_length]
else:
lowerCAmelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a_ ( lowerCamelCase : Optional[int] ):
lowerCAmelCase = ord(lowerCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCAmelCase = unicodedata.category(lowerCamelCase )
if cat.startswith('P' ):
return True
return False
@dataclass
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : int = -100
lowerCamelCase : str = "pt"
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Any ) -> List[Any]:
import torch
lowerCAmelCase = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCAmelCase = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
lowerCAmelCase = torch.tensor(batch['entity_ids'] ).shape[1]
lowerCAmelCase = self.tokenizer.padding_side
if padding_side == "right":
lowerCAmelCase = [
list(UpperCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) for label in labels
]
else:
lowerCAmelCase = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) + list(UpperCAmelCase__ ) for label in labels
]
lowerCAmelCase = [feature['ner_tags'] for feature in features]
lowerCAmelCase = padding_tensor(UpperCAmelCase__ , -1 , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = [feature['original_entity_spans'] for feature in features]
lowerCAmelCase = padding_tensor(UpperCAmelCase__ , (-1, -1) , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = {k: torch.tensor(UpperCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 4 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowerCAmelCase__ :Optional[Any] = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ :int = jax.device_count()
lowerCAmelCase__ :Optional[Any] = num_samples * [prompt]
lowerCAmelCase__ :Optional[Any] = sd_pipe.prepare_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = replicate(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = shard(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :Optional[Any] = jax.random.split(__UpperCAmelCase , jax.device_count() )
lowerCAmelCase__ :Dict = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=2_5 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase__ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ :Any = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase__ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ :str = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = 'stabilityai/stable-diffusion-2'
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCAmelCase , subfolder='scheduler' )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
lowerCAmelCase__ :Optional[int] = scheduler_params
lowerCAmelCase__ :Any = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ :str = jax.device_count()
lowerCAmelCase__ :List[str] = num_samples * [prompt]
lowerCAmelCase__ :Union[str, Any] = sd_pipe.prepare_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = replicate(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = shard(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :str = jax.random.split(__UpperCAmelCase , jax.device_count() )
lowerCAmelCase__ :Any = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=2_5 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ :Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase__ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ :Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 254 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
if rng is None:
lowerCAmelCase__ :Dict = random.Random()
lowerCAmelCase__ :Tuple = 1
for dim in shape:
total_dims *= dim
lowerCAmelCase__ :List[Any] = []
for _ in range(_SCREAMING_SNAKE_CASE ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCAmelCase__ :int = np.array(_SCREAMING_SNAKE_CASE , dtype=jnp.intaa ).reshape(_SCREAMING_SNAKE_CASE )
return output
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = ids_tensor(_SCREAMING_SNAKE_CASE , vocab_size=2 , rng=_SCREAMING_SNAKE_CASE )
# make sure that at least one token is attended to for each batch
lowerCAmelCase__ :Any = 1
return attn_mask
@require_flax
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[int] = None
__magic_name__ :List[str] = ()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCAmelCase__ :Union[str, Any] = 2
lowerCAmelCase__ :List[Any] = inputs['input_ids'].shape[-1] // 2
lowerCAmelCase__ :Union[str, Any] = inputs['input_ids'][:max_batch_size, :sequence_length]
lowerCAmelCase__ :str = jnp.ones_like(__UpperCAmelCase )
lowerCAmelCase__ :int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCAmelCase__ :List[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCAmelCase__ :Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self._get_input_ids_and_config()
lowerCAmelCase__ :int = False
lowerCAmelCase__ :List[Any] = max_length
lowerCAmelCase__ :List[Any] = 0
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :int = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase__ :List[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = pt_model_class(__UpperCAmelCase ).eval()
lowerCAmelCase__ :Dict = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params )
lowerCAmelCase__ :Union[str, Any] = flax_model.generate(__UpperCAmelCase ).sequences
lowerCAmelCase__ :Union[str, Any] = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCAmelCase__ :Union[str, Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = self._get_input_ids_and_config()
lowerCAmelCase__ :Any = False
lowerCAmelCase__ :Any = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[str] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Any = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = jit(model.generate )
lowerCAmelCase__ :Optional[Any] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = self._get_input_ids_and_config()
lowerCAmelCase__ :int = True
lowerCAmelCase__ :Optional[int] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Dict = model_class(__UpperCAmelCase )
lowerCAmelCase__ :int = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :Optional[Any] = False
lowerCAmelCase__ :Dict = max_length
lowerCAmelCase__ :Dict = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[str] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Dict = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Any = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self._get_input_ids_and_config()
lowerCAmelCase__ :int = False
lowerCAmelCase__ :Optional[Any] = max_length
lowerCAmelCase__ :Optional[Any] = 2
lowerCAmelCase__ :Optional[int] = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :str = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = self._get_input_ids_and_config()
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Tuple = max_length
lowerCAmelCase__ :Optional[int] = 0.8
lowerCAmelCase__ :Any = 1_0
lowerCAmelCase__ :List[Any] = 0.3
lowerCAmelCase__ :Tuple = 1
lowerCAmelCase__ :Union[str, Any] = 8
lowerCAmelCase__ :Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = jit(model.generate )
lowerCAmelCase__ :Any = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :Union[str, Any] = max_length
lowerCAmelCase__ :str = 1
lowerCAmelCase__ :Tuple = 8
lowerCAmelCase__ :Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = jit(model.generate )
lowerCAmelCase__ :Optional[int] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self._get_input_ids_and_config()
lowerCAmelCase__ :str = max_length
lowerCAmelCase__ :Dict = 2
lowerCAmelCase__ :Dict = 1
lowerCAmelCase__ :Optional[int] = 8
lowerCAmelCase__ :Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :int = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Dict = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :Tuple = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :Union[str, Any] = False
lowerCAmelCase__ :Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Tuple = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :int = jit(model.generate )
lowerCAmelCase__ :Optional[Any] = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :Any = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :Optional[Any] = True
lowerCAmelCase__ :Dict = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = jit(model.generate )
lowerCAmelCase__ :Optional[int] = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :int = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :Dict = 2
lowerCAmelCase__ :Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Any = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = jit(model.generate )
lowerCAmelCase__ :int = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
lowerCAmelCase__ :List[str] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCAmelCase__ :Optional[int] = 'Hello world'
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCAmelCase , 'do_samples' ):
model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCAmelCase , 'foo' ):
lowerCAmelCase__ :Optional[int] = {'foo': 'bar'}
model.generate(__UpperCAmelCase , **__UpperCAmelCase )
| 254 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = StableUnCLIPImgaImgPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case = frozenset([] )
def UpperCAmelCase ( self ) -> int:
snake_case : int = 3_2
snake_case : Any = embedder_hidden_size
# image encoding components
snake_case : Any = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
torch.manual_seed(0 )
snake_case : List[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=A , projection_dim=A , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
snake_case : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=A )
snake_case : Dict = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
snake_case : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
snake_case : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
snake_case : Dict = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A , layers_per_block=1 , upcast_attention=A , use_linear_projection=A , )
torch.manual_seed(0 )
snake_case : str = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=A , steps_offset=1 , )
torch.manual_seed(0 )
snake_case : str = AutoencoderKL()
snake_case : Dict = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def UpperCAmelCase ( self , A , A=0 , A=True ) -> Union[str, Any]:
if str(A ).startswith("""mps""" ):
snake_case : Optional[Any] = torch.manual_seed(A )
else:
snake_case : List[str] = torch.Generator(device=A ).manual_seed(A )
snake_case : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
if pil_image:
snake_case : List[str] = input_image * 0.5 + 0.5
snake_case : List[str] = input_image.clamp(0 , 1 )
snake_case : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case : Any = DiffusionPipeline.numpy_to_pil(A )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[Any] = self.get_dummy_components()
snake_case : Optional[Any] = StableUnCLIPImgaImgPipeline(**A )
snake_case : List[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Any = self.get_dummy_inputs(A )
inputs.update({"""image_embeds""": None} )
snake_case : Any = sd_pipe(**A ).images
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : List[str] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Dict = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : int = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=A )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=A )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
snake_case : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
snake_case : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Tuple = pipe(A , """anime turle""" , generator=A , output_type="""np""" )
snake_case : List[str] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(A , A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
snake_case : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
snake_case : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Any = pipe(A , """anime turle""" , generator=A , output_type="""np""" )
snake_case : Dict = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(A , A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
snake_case : List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case : Tuple = pipe(
A , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
snake_case : str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 124 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[str] = downstream_dict["""projector.weight"""]
snake_case : Dict = downstream_dict["""projector.bias"""]
snake_case : Dict = downstream_dict["""model.post_net.linear.weight"""]
snake_case : List[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
snake_case : str = WavaVecaForAudioFrameClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[Any] = downstream_dict["""model.linear.weight"""]
snake_case : str = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : Any = WavaVecaForXVector.from_pretrained(lowercase ,config=lowercase )
snake_case : str = downstream_dict["""connector.weight"""]
snake_case : Optional[Any] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case : List[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
snake_case : Optional[int] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
snake_case : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Tuple = torch.load(lowercase ,map_location="""cpu""" )
snake_case : Any = checkpoint["""Downstream"""]
snake_case : List[str] = WavaVecaConfig.from_pretrained(lowercase )
snake_case : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
lowercase ,return_attention_mask=lowercase ,do_normalize=lowercase )
snake_case : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case : int = convert_classification(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case : Dict = convert_diarization(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForXVector""" ):
snake_case : Optional[Any] = convert_xvector(lowercase ,lowercase ,lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
snake_case : List[str] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 124 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__: Any = credit_card_number
a__: Tuple = 0
a__: List[str] = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
a__: Tuple = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a__: Optional[Any] = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__: Optional[int] = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 363 | """simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[10, 20, 30, 40] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ) -> Any:
'''simple docstring'''
a__: Union[str, Any] = parent
a__: int = batch_size
a__: List[Any] = image_size
a__: Any = num_channels
a__: Dict = embeddings_size
a__: str = hidden_sizes
a__: List[Any] = depths
a__: Optional[int] = is_training
a__: Optional[int] = use_labels
a__: Tuple = hidden_act
a__: Any = num_labels
a__: Union[str, Any] = scope
a__: Tuple = len(lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__: str = None
if self.use_labels:
a__: Dict = ids_tensor([self.batch_size] , self.num_labels)
a__: int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: Dict = RegNetModel(config=lowercase)
model.to(lowercase)
model.eval()
a__: List[str] = model(lowercase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.num_labels
a__: Tuple = RegNetForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__: Any = model(lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = self.prepare_config_and_inputs()
a__ , a__ , a__: List[Any] = config_and_inputs
a__: Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a__ = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Dict = RegNetModelTester(self)
a__: Dict = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: int = model_class(lowercase)
a__: str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: int = [*signature.parameters.keys()]
a__: List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__ , a__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: int = model_class(config=lowercase)
for name, module in model.named_modules():
if isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__: int = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: int = model(**self._prepare_for_class(lowercase , lowercase))
a__: Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__: List[str] = self.model_tester.num_stages
self.assertEqual(len(lowercase) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a__ , a__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__: Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__: Dict = layer_type
a__: Dict = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__: str = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Tuple = RegNetModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def __a ( ) ->Dict:
a__: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Tuple = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowercase)
a__: Tuple = self.default_image_processor
a__: str = prepare_img()
a__: Optional[int] = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__: List[Any] = model(**lowercase)
# verify the logits
a__: Tuple = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , lowercase)
a__: Optional[Any] = torch.tensor([-0.4180, -1.5051, -3.4836]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
| 203 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case :Optional[int] = NewType('''DataClass''', Any)
__snake_case :str = NewType('''DataClassType''', Any)
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def __snake_case ( _UpperCAmelCase ):
__a = {str(_UpperCAmelCase ): choice for choice in choices}
return lambda _UpperCAmelCase : str_to_choice.get(_UpperCAmelCase , _UpperCAmelCase )
def __snake_case ( *,
_UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__a = {}
if aliases is not None:
__a = aliases
if help is not None:
__a = help
return dataclasses.field(metadata=_UpperCAmelCase , default=_UpperCAmelCase , default_factory=_UpperCAmelCase , **_UpperCAmelCase )
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Iterable[DataClassType]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[DataClassType, Iterable[DataClassType]] , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
if "formatter_class" not in kwargs:
__a = ArgumentDefaultsHelpFormatter
super().__init__(**__SCREAMING_SNAKE_CASE)
if dataclasses.is_dataclass(__SCREAMING_SNAKE_CASE):
__a = [dataclass_types]
__a = list(__SCREAMING_SNAKE_CASE)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__SCREAMING_SNAKE_CASE)
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser , __SCREAMING_SNAKE_CASE : dataclasses.Field):
'''simple docstring'''
__a = F'--{field.name}'
__a = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __SCREAMING_SNAKE_CASE):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''')
__a = kwargs.pop('''aliases''' , [])
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [aliases]
__a = getattr(field.type , '''__origin__''' , field.type)
if origin_type is Union or (hasattr(__SCREAMING_SNAKE_CASE , '''UnionType''') and isinstance(__SCREAMING_SNAKE_CASE , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(__SCREAMING_SNAKE_CASE) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F' Problem encountered in field \'{field.name}\'.')
if type(__SCREAMING_SNAKE_CASE) not in field.type.__args__:
# filter `str` in Union
__a = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__a = getattr(field.type , '''__origin__''' , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__a = (
field.type.__args__[0] if isinstance(__SCREAMING_SNAKE_CASE , field.type.__args__[1]) else field.type.__args__[1]
)
__a = getattr(field.type , '''__origin__''' , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__a = {}
if origin_type is Literal or (isinstance(field.type , __SCREAMING_SNAKE_CASE) and issubclass(field.type , __SCREAMING_SNAKE_CASE)):
if origin_type is Literal:
__a = field.type.__args__
else:
__a = [x.value for x in field.type]
__a = make_choice_type_function(kwargs['''choices'''])
if field.default is not dataclasses.MISSING:
__a = field.default
else:
__a = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__a = copy(__SCREAMING_SNAKE_CASE)
# Hack because type=bool in argparse does not behave as we want.
__a = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__a = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__a = default
# This tells argparse we accept 0 or 1 value after --field_name
__a = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__a = True
elif isclass(__SCREAMING_SNAKE_CASE) and issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = field.type.__args__[0]
__a = '''+'''
if field.default_factory is not dataclasses.MISSING:
__a = field.default_factory()
elif field.default is dataclasses.MISSING:
__a = True
else:
__a = field.type
if field.default is not dataclasses.MISSING:
__a = field.default
elif field.default_factory is not dataclasses.MISSING:
__a = field.default_factory()
else:
__a = True
parser.add_argument(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__a = False
parser.add_argument(F'--no_{field.name}' , action='''store_false''' , dest=field.name , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : DataClassType):
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , '''_argument_group_name'''):
__a = self.add_argument_group(dtype._argument_group_name)
else:
__a = self
try:
__a = get_type_hints(__SCREAMING_SNAKE_CASE)
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''')
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__SCREAMING_SNAKE_CASE):
__a = '''.'''.join(map(__SCREAMING_SNAKE_CASE , sys.version_info[:3]))
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''') from ex
raise
for field in dataclasses.fields(__SCREAMING_SNAKE_CASE):
if not field.init:
continue
__a = type_hints[field.name]
self._parse_dataclass_field(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
__a = []
if args_filename:
args_files.append(Path(__SCREAMING_SNAKE_CASE))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix('''.args'''))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__a = ArgumentParser()
args_file_parser.add_argument(__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , action='''append''')
# Use only remaining args for further parsing (remove the args_file_flag)
__a , __a = args_file_parser.parse_known_args(args=__SCREAMING_SNAKE_CASE)
__a = vars(__SCREAMING_SNAKE_CASE).get(args_file_flag.lstrip('''-''') , __SCREAMING_SNAKE_CASE)
if cmd_args_file_paths:
args_files.extend([Path(__SCREAMING_SNAKE_CASE) for p in cmd_args_file_paths])
__a = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__a = file_args + args if args is not None else file_args + sys.argv[1:]
__a , __a = self.parse_known_args(args=__SCREAMING_SNAKE_CASE)
__a = []
for dtype in self.dataclass_types:
__a = {f.name for f in dataclasses.fields(__SCREAMING_SNAKE_CASE) if f.init}
__a = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE).items() if k in keys}
for k in keys:
delattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = dtype(**__SCREAMING_SNAKE_CASE)
outputs.append(__SCREAMING_SNAKE_CASE)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(__SCREAMING_SNAKE_CASE)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}')
return (*outputs,)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict[str, Any] , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
__a = set(args.keys())
__a = []
for dtype in self.dataclass_types:
__a = {f.name for f in dataclasses.fields(__SCREAMING_SNAKE_CASE) if f.init}
__a = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
__a = dtype(**__SCREAMING_SNAKE_CASE)
outputs.append(__SCREAMING_SNAKE_CASE)
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(__SCREAMING_SNAKE_CASE)}')
return tuple(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
with open(Path(__SCREAMING_SNAKE_CASE) , encoding='''utf-8''') as open_json_file:
__a = json.loads(open_json_file.read())
__a = self.parse_dict(__SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
__a = self.parse_dict(yaml.safe_load(Path(__SCREAMING_SNAKE_CASE).read_text()) , allow_extra_keys=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
__lowerCAmelCase : List[Any] = 1
@register_to_config
def __init__( self : Union[str, Any] , _lowerCamelCase : int = 10_00 , _lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(_lowerCamelCase )
# standard deviation of the initial noise distribution
A_ : Optional[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A_ : List[str] = 4
# running values
A_ : Optional[int] = []
def a_ ( self : str , _lowerCamelCase : int , _lowerCamelCase : Union[str, torch.device] = None ):
"""simple docstring"""
A_ : Tuple = num_inference_steps
A_ : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
A_ : Optional[Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
A_ : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
A_ : int = torch.sin(steps * math.pi / 2 ) ** 2
A_ : Dict = (1.0 - self.betas**2) ** 0.5
A_ : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
A_ : int = timesteps.to(_lowerCamelCase )
A_ : int = []
def a_ ( self : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
A_ : Union[str, Any] = (self.timesteps == timestep).nonzero().item()
A_ : Dict = timestep_index + 1
A_ : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCamelCase )
if len(self.ets ) == 1:
A_ : Dict = self.ets[-1]
elif len(self.ets ) == 2:
A_ : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
A_ : Optional[Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
A_ : str = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
A_ : Union[str, Any] = self._get_prev_sample(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : torch.FloatTensor , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return sample
def a_ ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.alphas[timestep_index]
A_ : Union[str, Any] = self.betas[timestep_index]
A_ : int = self.alphas[prev_timestep_index]
A_ : Tuple = self.betas[prev_timestep_index]
A_ : str = (sample - sigma * ets) / max(_lowerCamelCase , 1E-8 )
A_ : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 167 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : Dict = StableDiffusionDiffEditPipeline
a__ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
a__ : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
a__ : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : Tuple = frozenset([] )
def __A ( self : List[str] ) -> Any:
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_zero=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=0 ) -> Dict:
__lowerCamelCase = floats_tensor((1, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=0 ) -> Dict:
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('''RGB''' )
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Union[str, Any]:
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('''RGB''' )
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : str ) -> Union[str, Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipe_loaded.to(SCREAMING_SNAKE_CASE__ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe_loaded(**SCREAMING_SNAKE_CASE__ )[0]
__lowerCamelCase = np.abs(output - output_loaded ).max()
self.assertLess(SCREAMING_SNAKE_CASE__ , 1e-4 )
def __A ( self : List[Any] ) -> List[str]:
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_dummy_mask_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe.generate_mask(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__lowerCamelCase = np.array([0] * 9 )
__lowerCamelCase = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_dummy_inversion_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe.invert(**SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__lowerCamelCase = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __A ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def __A ( self : Tuple ) -> Any:
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = {'''beta_start''': 0.00085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
__lowerCamelCase = DPMSolverMultistepScheduler(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DPMSolverMultistepInverseScheduler(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_dummy_inversion_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe.invert(**SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__lowerCamelCase = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[Any] ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __A ( cls : Tuple ) -> List[str]:
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
__lowerCamelCase = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
__lowerCamelCase = raw_image
def __A ( self : Optional[Any] ) -> Optional[int]:
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
__lowerCamelCase = DDIMScheduler.from_config(pipe.scheduler.config )
__lowerCamelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''a bowl of fruit'''
__lowerCamelCase = '''a bowl of pears'''
__lowerCamelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=SCREAMING_SNAKE_CASE__ , target_prompt=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = pipe.invert(
prompt=SCREAMING_SNAKE_CASE__ , image=self.raw_image , inpaint_strength=0.7 , generator=SCREAMING_SNAKE_CASE__ ).latents
__lowerCamelCase = pipe(
prompt=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , image_latents=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
__lowerCamelCase = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def __A ( self : Any ) -> str:
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCamelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''a bowl of fruit'''
__lowerCamelCase = '''a bowl of pears'''
__lowerCamelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=SCREAMING_SNAKE_CASE__ , target_prompt=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = pipe.invert(
prompt=SCREAMING_SNAKE_CASE__ , image=self.raw_image , inpaint_strength=0.7 , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , ).latents
__lowerCamelCase = pipe(
prompt=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , image_latents=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
__lowerCamelCase = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 368 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __magic_name__ ( __lowerCAmelCase : Any ) -> int:
__lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0]
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(rows * cols * num_images )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
__lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
return data
@deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict:
__lowerCamelCase = labels_dense.shape[0]
__lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes
__lowerCamelCase = numpy.zeros((num_labels, num_classes) )
__lowerCamelCase = 1
return labels_one_hot
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(__lowerCAmelCase )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase )
return labels
class lowerCAmelCase__ :
@deprecated(
SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
__lowerCamelCase = 1_00_00
__lowerCamelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__lowerCamelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCamelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCamelCase = images.astype(numpy.floataa )
__lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 )
__lowerCamelCase = images
__lowerCamelCase = labels
__lowerCamelCase = 0
__lowerCamelCase = 0
@property
def __A ( self : str ) -> Optional[int]:
return self._images
@property
def __A ( self : Any ) -> Dict:
return self._labels
@property
def __A ( self : List[Any] ) -> int:
return self._num_examples
@property
def __A ( self : str ) -> Any:
return self._epochs_completed
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str:
if fake_data:
__lowerCamelCase = [1] * 7_84
__lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE__ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE__ )],
)
__lowerCamelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perma]
__lowerCamelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCamelCase = self._num_examples - start
__lowerCamelCase = self._images[start : self._num_examples]
__lowerCamelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perm]
__lowerCamelCase = self.labels[perm]
# Start next epoch
__lowerCamelCase = 0
__lowerCamelCase = batch_size - rest_num_examples
__lowerCamelCase = self._index_in_epoch
__lowerCamelCase = self._images[start:end]
__lowerCamelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowerCamelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
if not gfile.Exists(__lowerCAmelCase ):
gfile.MakeDirs(__lowerCAmelCase )
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not gfile.Exists(__lowerCAmelCase ):
urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310
with gfile.GFile(__lowerCAmelCase ) as f:
__lowerCamelCase = f.size()
print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' )
return filepath
@deprecated(
__lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase )
__lowerCamelCase = fake()
__lowerCamelCase = fake()
__lowerCamelCase = fake()
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
if not source_url: # empty string check
__lowerCamelCase = DEFAULT_SOURCE_URL
__lowerCamelCase = '''train-images-idx3-ubyte.gz'''
__lowerCamelCase = '''train-labels-idx1-ubyte.gz'''
__lowerCamelCase = '''t10k-images-idx3-ubyte.gz'''
__lowerCamelCase = '''t10k-labels-idx1-ubyte.gz'''
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
if not 0 <= validation_size <= len(__lowerCAmelCase ):
__lowerCamelCase = (
'''Validation size should be between 0 and '''
f'''{len(__lowerCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(__lowerCAmelCase )
__lowerCamelCase = train_images[:validation_size]
__lowerCamelCase = train_labels[:validation_size]
__lowerCamelCase = train_images[validation_size:]
__lowerCamelCase = train_labels[validation_size:]
__lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
| 339 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.