code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,):
"""simple docstring"""
UpperCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the reference grid
UpperCamelCase = 1
UpperCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the action grid
UpperCamelCase = init[0]
UpperCamelCase = init[1]
UpperCamelCase = 0
UpperCamelCase = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCamelCase = [[f, g, x, y]]
UpperCamelCase = False # flag that is set when search is complete
UpperCamelCase = False # flag set if we can't find expand
while not found and not resign:
if len(_lowercase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCamelCase = cell.pop()
UpperCamelCase = next_cell[2]
UpperCamelCase = next_cell[3]
UpperCamelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCamelCase = True
else:
for i in range(len(_lowercase ) ): # to try out different valid actions
UpperCamelCase = x + DIRECTIONS[i][0]
UpperCamelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowercase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCamelCase = g + cost
UpperCamelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCamelCase = 1
UpperCamelCase = i
UpperCamelCase = []
UpperCamelCase = goal[0]
UpperCamelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCamelCase = x - DIRECTIONS[action[x][y]][0]
UpperCamelCase = y - DIRECTIONS[action[x][y]][1]
UpperCamelCase = xa
UpperCamelCase = ya
invpath.append([x, y] )
UpperCamelCase = []
for i in range(len(_lowercase ) ):
path.append(invpath[len(_lowercase ) - 1 - i] )
return path, action
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
SCREAMING_SNAKE_CASE_ = [0, 0]
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE_ = [len(grid) - 1, len(grid[0]) - 1]
SCREAMING_SNAKE_CASE_ = 1
# the cost map which pushes the path closer to the goal
SCREAMING_SNAKE_CASE_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
SCREAMING_SNAKE_CASE_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
SCREAMING_SNAKE_CASE_ = 99
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i]) | 34 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''note_seq''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(self , ['''note_seq'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''note_seq'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''note_seq''']) | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = WavaVecaPhonemeCTCTokenizer
A_ = False
def UpperCAmelCase__ ( self) -> List[str]:
super().setUp()
UpperCamelCase = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''')
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(lowerCamelCase_) + '''\n''')
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=2_0 , lowerCamelCase_=5) -> Tuple[str, list]:
UpperCamelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_)) for i in range(len(lowerCamelCase_))]
UpperCamelCase = list(filter(lambda lowerCamelCase_: [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCamelCase_) , lowerCamelCase_))
if max_length is not None and len(lowerCamelCase_) > max_length:
UpperCamelCase = toks[:max_length]
if min_length is not None and len(lowerCamelCase_) < min_length and len(lowerCamelCase_) > 0:
while len(lowerCamelCase_) < min_length:
UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_)
if " " not in output_txt and len(lowerCamelCase_) > 1:
UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_)
)
if with_prefix_space:
UpperCamelCase = ''' ''' + output_txt
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
return output_txt, output_ids
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Optional[int]:
kwargs.update(self.special_tokens_map)
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
# check adding a single token
tokenizer.add_tokens('''xxx''')
UpperCamelCase = tokenizer('''m xxx ɪ''' , do_phonemize=lowerCamelCase_).input_ids
self.assertEqual(lowerCamelCase_ , [1_3, 3_9_2, 1_7]) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''])
UpperCamelCase = tokenizer('''m aaa ɪ ccc''' , do_phonemize=lowerCamelCase_).input_ids
self.assertEqual(lowerCamelCase_ , [1_3, 3_9_3, 1_7, 3_9_5]) # aaa and ccc should be after xxx and 2 after aaa
UpperCamelCase = tokenizer('''maɪ c''' , do_phonemize=lowerCamelCase_).input_ids
self.assertEqual(lowerCamelCase_ , [3, 2_0_0]) # mai should be <unk> (=3)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCamelCase = '''Hello how are you'''
UpperCamelCase = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='''en-us''')
self.assertEqual(lowerCamelCase_ , '''h ə l oʊ h aʊ ɑːɹ j uː''')
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCamelCase = '''Hello how are you'''
UpperCamelCase = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='''en-us''')
self.assertEqual(tokenizer(lowerCamelCase_).input_ids , tokenizer(lowerCamelCase_ , do_phonemize=lowerCamelCase_).input_ids)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCamelCase = '''Hello how are you'''
UpperCamelCase = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='''en-us''')
UpperCamelCase = tokenizer.decode(tokenizer(lowerCamelCase_).input_ids)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCamelCase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
UpperCamelCase = tokenizer.decode(sample_ids[0])
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , batch_tokens[0])
self.assertEqual(lowerCamelCase_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
UpperCamelCase = '''Hello how are you'''
UpperCamelCase = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='''en-us''')
self.assertEqual(lowerCamelCase_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''')
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
UpperCamelCase = '''Hello how are you'''
UpperCamelCase = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='''en-us''')
self.assertEqual(tokenizer(lowerCamelCase_).input_ids , tokenizer(lowerCamelCase_ , do_phonemize=lowerCamelCase_).input_ids)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
# fmt: off
UpperCamelCase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
UpperCamelCase = tokenizer.decode(sample_ids[0])
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , batch_tokens[0])
self.assertEqual(lowerCamelCase_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''])
# decode with no word_del_token filter
UpperCamelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCamelCase_)
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_ , filter_word_delimiter_token=lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , batch_tokens[0])
self.assertEqual(lowerCamelCase_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''])
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
UpperCamelCase = '''Hello how are you'''
UpperCamelCase = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='''en-us''')
UpperCamelCase = tokenizer.decode(tokenizer(lowerCamelCase_).input_ids , filter_word_delimiter_token=lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
UpperCamelCase = '''Hello how are you'''
UpperCamelCase = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='''en-us''')
UpperCamelCase = tokenizer.decode(tokenizer(lowerCamelCase_).input_ids , filter_word_delimiter_token=lowerCamelCase_)
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''')]).strip() , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=lowerCamelCase_)
UpperCamelCase = '''Hello how are you'''
UpperCamelCase = tokenizer(lowerCamelCase_ , phonemizer_lang='''en-us''').input_ids
UpperCamelCase = tokenizer(lowerCamelCase_ , phonemizer_lang='''fr-fr''').input_ids
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokenizer.decode(lowerCamelCase_)
UpperCamelCase = tokenizer.decode(lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , '''h ə l oʊ h aʊ ɑːɹ j uː''')
self.assertEqual(lowerCamelCase_ , '''ɛ l o h aʊ a ʁ j u''')
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCamelCase = '''Hello how Are you'''
UpperCamelCase = '''hello how are you'''
UpperCamelCase = tokenizer(lowerCamelCase_).input_ids
UpperCamelCase = tokenizer(lowerCamelCase_).input_ids
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
tokenizer.add_tokens(['''!''', '''?'''])
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''})
# fmt: off
UpperCamelCase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''])
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.get_tokenizer(word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCamelCase = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
UpperCamelCase = tokenizer.decode(lowerCamelCase_ , output_char_offsets=lowerCamelCase_ , filter_word_delimiter_token=lowerCamelCase_)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()) , 2)
self.assertTrue('''text''' in outputs)
self.assertTrue('''char_offsets''' in outputs)
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_))
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''')) , outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''') , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''])
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''') , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6])
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''') , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7])
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.get_tokenizer(word_delimiter_token='''|''')
def check_list_tuples_equal(lowerCamelCase_ , lowerCamelCase_):
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_))
self.assertTrue(isinstance(outputs_list[0] , lowerCamelCase_))
# transform list to ModelOutput
UpperCamelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]})
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''])
def recursive_check(lowerCamelCase_ , lowerCamelCase_):
if isinstance(lowerCamelCase_ , lowerCamelCase_):
[recursive_check(lowerCamelCase_ , lowerCamelCase_) for la, la in zip(lowerCamelCase_ , lowerCamelCase_)]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''])
# fmt: off
UpperCamelCase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_ , output_char_offsets=lowerCamelCase_)
UpperCamelCase = [tokenizer.decode(lowerCamelCase_ , output_char_offsets=lowerCamelCase_) for ids in sample_ids]
check_list_tuples_equal(lowerCamelCase_ , lowerCamelCase_)
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''')
def UpperCAmelCase__ ( self) -> Tuple:
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''')
def UpperCAmelCase__ ( self) -> Dict:
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''')
def UpperCAmelCase__ ( self) -> int:
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''')
def UpperCAmelCase__ ( self) -> Tuple:
pass
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = self.get_tokenizers(do_lower_case=lowerCamelCase_)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(lowerCamelCase_)
self.assertNotEqual(lowerCamelCase_ , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCamelCase = tokenizer.add_tokens(lowerCamelCase_)
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(lowerCamelCase_)
self.assertNotEqual(lowerCamelCase_ , 0)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , len(lowerCamelCase_))
self.assertEqual(lowerCamelCase_ , all_size + len(lowerCamelCase_))
UpperCamelCase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=lowerCamelCase_)
self.assertGreaterEqual(len(lowerCamelCase_) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
UpperCamelCase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCamelCase = tokenizer.add_special_tokens(lowerCamelCase_)
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(lowerCamelCase_)
self.assertNotEqual(lowerCamelCase_ , 0)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , len(lowerCamelCase_))
self.assertEqual(lowerCamelCase_ , all_size_a + len(lowerCamelCase_))
UpperCamelCase = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=lowerCamelCase_)
self.assertGreaterEqual(len(lowerCamelCase_) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''')
def UpperCAmelCase__ ( self) -> Tuple:
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''')
def UpperCAmelCase__ ( self) -> Optional[int]:
pass
def UpperCAmelCase__ ( self) -> Optional[Any]:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCamelCase = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
UpperCamelCase = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
UpperCamelCase = tokenizer.convert_tokens_to_string(lowerCamelCase_)
self.assertIsInstance(output['''text'''] , lowerCamelCase_) | 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''deformable_detr'''
A_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=3 , lowerCamelCase_=3_0_0 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=6 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=8 , lowerCamelCase_=6 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=8 , lowerCamelCase_=0.0 , lowerCamelCase_=True , lowerCamelCase_="relu" , lowerCamelCase_=2_5_6 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1.0 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="sine" , lowerCamelCase_="resnet50" , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_=False , lowerCamelCase_=3_0_0 , lowerCamelCase_=False , lowerCamelCase_=1 , lowerCamelCase_=5 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_=1 , lowerCamelCase_=5 , lowerCamelCase_=2 , lowerCamelCase_=0.1 , lowerCamelCase_=0.25 , lowerCamelCase_=False , **lowerCamelCase_ , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = backbone_config.get('''model_type''')
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(lowerCamelCase_)
UpperCamelCase = use_timm_backbone
UpperCamelCase = backbone_config
UpperCamelCase = num_channels
UpperCamelCase = num_queries
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
UpperCamelCase = backbone
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = dilation
# deformable attributes
UpperCamelCase = num_feature_levels
UpperCamelCase = encoder_n_points
UpperCamelCase = decoder_n_points
UpperCamelCase = two_stage
UpperCamelCase = two_stage_num_proposals
UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
UpperCamelCase = focal_alpha
UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self) -> int:
return self.d_model
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_ = {
'gpt-neox-20b': 2048,
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_=False , **lowerCamelCase_ , ) -> List[str]:
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase_) != add_prefix_space:
UpperCamelCase = getattr(lowerCamelCase_ , pre_tok_state.pop('''type'''))
UpperCamelCase = add_prefix_space
UpperCamelCase = pre_tok_class(**lowerCamelCase_)
UpperCamelCase = add_prefix_space
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
UpperCamelCase = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_)
return tuple(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[int]:
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_) + [self.eos_token_id])
if len(lowerCamelCase_) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids | 34 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''gptj'''
A_ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowerCamelCase_=5_0_4_0_0 , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=6_4 , lowerCamelCase_=None , lowerCamelCase_="gelu_new" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=1e-5 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=5_0_2_5_6 , lowerCamelCase_=5_0_2_5_6 , lowerCamelCase_=False , **lowerCamelCase_ , ) -> Union[str, Any]:
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = rotary_dim
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = "default" , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> int:
super().__init__(lowerCamelCase_ , task=lowerCamelCase_ , patching_specs=lowerCamelCase_ , use_past=lowerCamelCase_)
if not getattr(self._config , '''pad_token_id''' , lowerCamelCase_):
# TODO: how to do that better?
UpperCamelCase = 0
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''')
UpperCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCAmelCase__ ( self) -> int:
return self._config.n_layer
@property
def UpperCAmelCase__ ( self) -> int:
return self._config.n_head
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = -1 , lowerCamelCase_ = -1 , lowerCamelCase_ = False , lowerCamelCase_ = None , ) -> Mapping[str, Any]:
UpperCamelCase = super(lowerCamelCase_ , self).generate_dummy_inputs(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_)
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(lowerCamelCase_), torch.zeros(lowerCamelCase_)) for _ in range(self.num_layers)
]
UpperCamelCase = common_inputs['''attention_mask''']
if self.use_past:
UpperCamelCase = ordered_inputs['''attention_mask'''].dtype
UpperCamelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_)] , dim=1)
return ordered_inputs
@property
def UpperCAmelCase__ ( self) -> int:
return 1_3 | 34 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> None:
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self) -> Optional[Any]:
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_)
yield node.data
UpperCamelCase = node.next_node
@property
def UpperCAmelCase__ ( self) -> bool:
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Node(1)
SCREAMING_SNAKE_CASE_ = Node(2)
SCREAMING_SNAKE_CASE_ = Node(3)
SCREAMING_SNAKE_CASE_ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE_ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE_ = Node(5)
SCREAMING_SNAKE_CASE_ = Node(6)
SCREAMING_SNAKE_CASE_ = Node(5)
SCREAMING_SNAKE_CASE_ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE_ = Node(1)
print(root_node.has_loop) # False | 34 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCamelCase = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCamelCase_ , cache_dir=lowerCamelCase_)
UpperCamelCase = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_ , os.listdir(lowerCamelCase_)[0] , '''snapshots'''))]
UpperCamelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCamelCase_)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 4
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.151_4745) < 1e-3
assert np.abs(np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 4_9947.875) < 5e-1
UpperCamelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(lowerCamelCase_) == num_samples
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=lowerCamelCase_)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0565_2401)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 238_3808.2)) < 5e-1
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 237_3516.75)) < 5e-1
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 237_3516.75)) < 5e-1
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , )
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
UpperCamelCase = scheduler.create_state()
UpperCamelCase = scheduler_state
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4504_3945)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 234_7693.5)) < 5e-1
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = jax.random.split(jax.random.PRNGKey(0) , lowerCamelCase_)
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , )
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
UpperCamelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , use_memory_efficient_attention=lowerCamelCase_ , )
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
UpperCamelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2 | 34 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = '''ylacombe/bark-small'''
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = '''en_speaker_1'''
UpperCamelCase = '''This is a test string'''
UpperCamelCase = '''speaker_embeddings_path.json'''
UpperCamelCase = '''speaker_embeddings'''
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> str:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Any:
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BarkProcessor(tokenizer=lowerCamelCase_)
processor.save_pretrained(self.tmpdirname)
UpperCamelCase = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCamelCase = 3_5
UpperCamelCase = 2
UpperCamelCase = 8
UpperCamelCase = {
'''semantic_prompt''': np.ones(lowerCamelCase_),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len)),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
UpperCamelCase = processor(text=self.input_string , voice_preset=lowerCamelCase_)
UpperCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([])).tolist())
# test loading voice preset from npz file
UpperCamelCase = os.path.join(self.tmpdirname , '''file.npz''')
np.savez(lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = processor(text=self.input_string , voice_preset=lowerCamelCase_)
UpperCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([])).tolist())
# test loading voice preset from the hub
UpperCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BarkProcessor(tokenizer=lowerCamelCase_)
UpperCamelCase = processor(text=self.input_string)
UpperCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_5_6 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist()) | 34 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 34 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 1 |
"""simple docstring"""
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 0
for i in range(1 ,1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 34 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
SCREAMING_SNAKE_CASE_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase , UpperCamelCase , UpperCamelCase = _str_to_version_tuple(self.version_str)
def __repr__( self) -> List[Any]:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.major, self.minor, self.patch
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
return Version(lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
return other
raise TypeError(F'{other} (type {type(lowerCamelCase_)}) cannot be compared to version.')
def __eq__( self , lowerCamelCase_) -> Optional[int]:
try:
UpperCamelCase = self._validate_operand(lowerCamelCase_)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowerCamelCase_) -> Tuple:
UpperCamelCase = self._validate_operand(lowerCamelCase_)
return self.tuple < other.tuple
def __hash__( self) -> str:
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def UpperCAmelCase__ ( self) -> str:
return self.version_str
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = _VERSION_REG.match(_lowercase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_lowercase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __snake_case ( _lowercase ):
"""simple docstring"""
return ".".join(str(_lowercase ) for v in version_tuple ) | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = IFInpaintingSuperResolutionPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
A_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> List[Any]:
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def UpperCAmelCase__ ( self) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def UpperCAmelCase__ ( self) -> Union[str, Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def UpperCAmelCase__ ( self) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def UpperCAmelCase__ ( self) -> int:
self._test_save_load_local()
def UpperCAmelCase__ ( self) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , ) | 34 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
super().__init__(*lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = super().add_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_)
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
''' `placeholder_token` that is not already in the tokenizer.''')
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=1 , **lowerCamelCase_) -> int:
UpperCamelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_)
output.append(lowerCamelCase_)
else:
UpperCamelCase = []
for i in range(lowerCamelCase_):
UpperCamelCase = placeholder_token + F'_{i}'
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_)
output.append(lowerCamelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent')
UpperCamelCase = output
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0) -> Any:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = []
for i in range(len(lowerCamelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCamelCase = self.token_map[placeholder_token]
UpperCamelCase = tokens[: 1 + int(len(lowerCamelCase_) * prop_tokens_to_load)]
if vector_shuffle:
UpperCamelCase = copy.copy(lowerCamelCase_)
random.shuffle(lowerCamelCase_)
UpperCamelCase = text.replace(lowerCamelCase_ , ''' '''.join(lowerCamelCase_))
return text
def __call__( self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 , **lowerCamelCase_) -> Tuple:
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_) , *lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 , **lowerCamelCase_) -> List[Any]:
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_) , *lowerCamelCase_ , **lowerCamelCase_ , ) | 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 34 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE_ = False
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_)
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = generator.manual_seed(0)
UpperCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = '''cyberpunk 2077'''
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
UpperCamelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
UpperCamelCase = '''A painting of a squirrel eating a burger '''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''').images
UpperCamelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
UpperCamelCase = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''').images
UpperCamelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 | 34 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 1 |
"""simple docstring"""
from math import factorial
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = real
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [1] * rank
else:
UpperCamelCase = rank
def __repr__( self) -> Any:
return (
F'{self.real}+'
F'{"+".join(str(lowerCamelCase_)+"E"+str(n+1)for n,dual in enumerate(self.duals))}'
)
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1)
return Dual(self.real , lowerCamelCase_)
def __add__( self , lowerCamelCase_) -> List[str]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
return Dual(self.real + other , self.duals)
UpperCamelCase = self.duals.copy()
UpperCamelCase = other.duals.copy()
if len(lowerCamelCase_) > len(lowerCamelCase_):
o_dual.extend([1] * (len(lowerCamelCase_) - len(lowerCamelCase_)))
elif len(lowerCamelCase_) < len(lowerCamelCase_):
s_dual.extend([1] * (len(lowerCamelCase_) - len(lowerCamelCase_)))
UpperCamelCase = []
for i in range(len(lowerCamelCase_)):
new_duals.append(s_dual[i] + o_dual[i])
return Dual(self.real + other.real , lowerCamelCase_)
A_ = __add__
def __sub__( self , lowerCamelCase_) -> str:
return self + other * -1
def __mul__( self , lowerCamelCase_) -> Union[str, Any]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = []
for i in self.duals:
new_duals.append(i * other)
return Dual(self.real * other , lowerCamelCase_)
UpperCamelCase = [0] * (len(self.duals) + len(other.duals) + 1)
for i, item in enumerate(self.duals):
for j, jtem in enumerate(other.duals):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals)):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals)):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase_)
A_ = __mul__
def __truediv__( self , lowerCamelCase_) -> List[str]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = []
for i in self.duals:
new_duals.append(i / other)
return Dual(self.real / other , lowerCamelCase_)
raise ValueError
def __floordiv__( self , lowerCamelCase_) -> Optional[Any]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = []
for i in self.duals:
new_duals.append(i // other)
return Dual(self.real // other , lowerCamelCase_)
raise ValueError
def __pow__( self , lowerCamelCase_) -> str:
if n < 0 or isinstance(lowerCamelCase_ , lowerCamelCase_):
raise ValueError('''power must be a positive integer''')
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase = self
for _ in range(n - 1):
x *= self
return x
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
if not callable(_lowercase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(_lowercase ,(float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(_lowercase ,_lowercase ):
raise ValueError('''differentiate() requires an int as input for order''' )
UpperCamelCase = Dual(_lowercase ,1 )
UpperCamelCase = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __snake_case ( _lowercase ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2)) | 34 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 1 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> int:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , model.state_dict())
UpperCamelCase = os.path.join(lowerCamelCase_ , '''index.json''')
self.assertTrue(os.path.isfile(lowerCamelCase_))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(lowerCamelCase_ , F'{key}.dat')
self.assertTrue(os.path.isfile(lowerCamelCase_))
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=lowerCamelCase_)
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(lowerCamelCase_ , '''weight''' , lowerCamelCase_ , {})
UpperCamelCase = os.path.join(lowerCamelCase_ , '''weight.dat''')
self.assertTrue(os.path.isfile(lowerCamelCase_))
self.assertDictEqual(lowerCamelCase_ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(lowerCamelCase_).split('''.''')[1]}})
UpperCamelCase = load_offloaded_weight(lowerCamelCase_ , index['''weight'''])
self.assertTrue(torch.equal(lowerCamelCase_ , lowerCamelCase_))
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key]))
UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , lowerCamelCase_)
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key]))
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
UpperCamelCase = extract_submodules_state_dict(lowerCamelCase_ , ['''a.1''', '''a.2'''])
self.assertDictEqual(lowerCamelCase_ , {'''a.1''': 0, '''a.2''': 2})
UpperCamelCase = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
UpperCamelCase = extract_submodules_state_dict(lowerCamelCase_ , ['''a.1''', '''a.2'''])
self.assertDictEqual(lowerCamelCase_ , {'''a.1.a''': 0, '''a.2.a''': 2}) | 34 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import sys
SCREAMING_SNAKE_CASE_ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = 1
for digit in s:
product *= int(_lowercase )
return product
def __snake_case ( _lowercase = N ):
"""simple docstring"""
UpperCamelCase = -sys.maxsize - 1
UpperCamelCase = n[:13]
UpperCamelCase = 13
while cur_index < len(_lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCamelCase = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCamelCase = max(_lowercase ,str_eval(_lowercase ) )
UpperCamelCase = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }') | 34 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = tuple[int, int, int]
SCREAMING_SNAKE_CASE_ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE_ = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
SCREAMING_SNAKE_CASE_ = 'FOBHMDKEXQNRAULPGSJVTYICZW'
SCREAMING_SNAKE_CASE_ = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
SCREAMING_SNAKE_CASE_ = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE_ = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
SCREAMING_SNAKE_CASE_ = 'SGLCPQWZHKXAREONTFBVIYJUDM'
SCREAMING_SNAKE_CASE_ = 'HVSICLTYKQUBXDWAJZOMFGPREN'
SCREAMING_SNAKE_CASE_ = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
SCREAMING_SNAKE_CASE_ = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
SCREAMING_SNAKE_CASE_ = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
if (unique_rotsel := len(set(_lowercase ) )) < 3:
UpperCamelCase = f'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(_lowercase )
# Checks if rotor positions are valid
UpperCamelCase , UpperCamelCase , UpperCamelCase = rotpos
if not 0 < rotorposa <= len(_lowercase ):
UpperCamelCase = f'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
UpperCamelCase = f'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
UpperCamelCase = f'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowercase )
# Validates string and returns dict
UpperCamelCase = _plugboard(_lowercase )
return rotpos, rotsel, pbdict
def __snake_case ( _lowercase ):
"""simple docstring"""
if not isinstance(_lowercase ,_lowercase ):
UpperCamelCase = f'Plugboard setting isn\'t type string ({type(_lowercase )})'
raise TypeError(_lowercase )
elif len(_lowercase ) % 2 != 0:
UpperCamelCase = f'Odd number of symbols ({len(_lowercase )})'
raise Exception(_lowercase )
elif pbstring == "":
return {}
pbstring.replace(''' ''' ,'''''' )
# Checks if all characters are unique
UpperCamelCase = set()
for i in pbstring:
if i not in abc:
UpperCamelCase = f'\'{i}\' not in list of symbols'
raise Exception(_lowercase )
elif i in tmppbl:
UpperCamelCase = f'Duplicate symbol ({i})'
raise Exception(_lowercase )
else:
tmppbl.add(_lowercase )
del tmppbl
# Created the dictionary
UpperCamelCase = {}
for j in range(0 ,len(_lowercase ) - 1 ,2 ):
UpperCamelCase = pbstring[j + 1]
UpperCamelCase = pbstring[j]
return pb
def __snake_case ( _lowercase ,_lowercase ,_lowercase = (rotora, rotora, rotora) ,_lowercase = "" ,):
"""simple docstring"""
UpperCamelCase = text.upper()
UpperCamelCase , UpperCamelCase , UpperCamelCase = _validator(
_lowercase ,_lowercase ,plugb.upper() )
UpperCamelCase , UpperCamelCase , UpperCamelCase = rotor_position
UpperCamelCase , UpperCamelCase , UpperCamelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCamelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCamelCase = plugboard[symbol]
# rotor ra --------------------------
UpperCamelCase = abc.index(_lowercase ) + rotorposa
UpperCamelCase = rotora[index % len(_lowercase )]
# rotor rb --------------------------
UpperCamelCase = abc.index(_lowercase ) + rotorposa
UpperCamelCase = rotora[index % len(_lowercase )]
# rotor rc --------------------------
UpperCamelCase = abc.index(_lowercase ) + rotorposa
UpperCamelCase = rotora[index % len(_lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCamelCase = reflector[symbol]
# 2nd rotors
UpperCamelCase = abc[rotora.index(_lowercase ) - rotorposa]
UpperCamelCase = abc[rotora.index(_lowercase ) - rotorposa]
UpperCamelCase = abc[rotora.index(_lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCamelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCamelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 'This is my Python script that emulates the Enigma machine from WWII.'
SCREAMING_SNAKE_CASE_ = (1, 1, 1)
SCREAMING_SNAKE_CASE_ = 'pictures'
SCREAMING_SNAKE_CASE_ = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE_ = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb)) | 34 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = '''imagenet-1k-id2label.json'''
UpperCamelCase = 1000
UpperCamelCase = '''huggingface/label-files'''
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(cached_download(hf_hub_url(_lowercase ,_lowercase ,repo_type='''dataset''' ) ) ,'''r''' ) )
UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = UpperCamelCase = CvtConfig(num_labels=_lowercase ,idalabel=_lowercase ,labelaid=_lowercase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' ,1 )[-1][4:6] == "13":
UpperCamelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' ,1 )[-1][4:6] == "21":
UpperCamelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCamelCase = [2, 2, 20]
UpperCamelCase = [3, 12, 16]
UpperCamelCase = [192, 768, 1024]
UpperCamelCase = CvtForImageClassification(_lowercase )
UpperCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
UpperCamelCase = image_size
UpperCamelCase = torch.load(_lowercase ,map_location=torch.device('''cpu''' ) )
UpperCamelCase = OrderedDict()
UpperCamelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCamelCase = list_of_state_dict + cls_token(_lowercase )
UpperCamelCase = list_of_state_dict + embeddings(_lowercase )
for cnt in range(config.depth[idx] ):
UpperCamelCase = list_of_state_dict + attention(_lowercase ,_lowercase )
UpperCamelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowercase )
for i in range(len(_lowercase ) ):
UpperCamelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 34 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''camembert'''
def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> str:
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
]) | 34 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
assert x is not None
assert y is not None
UpperCamelCase = len(_lowercase )
UpperCamelCase = len(_lowercase )
# declaring the array for storing the dp values
UpperCamelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 ,m + 1 ):
for j in range(1 ,n + 1 ):
UpperCamelCase = 1 if x[i - 1] == y[j - 1] else 0
UpperCamelCase = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match )
UpperCamelCase = ''''''
UpperCamelCase , UpperCamelCase = m, n
while i > 0 and j > 0:
UpperCamelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
UpperCamelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 'AGGTAB'
SCREAMING_SNAKE_CASE_ = 'GXTXAYB'
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 'GTAB'
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
SCREAMING_SNAKE_CASE_ = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
SCREAMING_SNAKE_CASE_ = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
if self.config_name == "default":
UpperCamelCase = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCamelCase = comet.load_from_checkpoint(comet.download_model(self.config_name))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False) -> Optional[int]:
if gpus is None:
UpperCamelCase = 1 if torch.cuda.is_available() else 0
UpperCamelCase = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCamelCase = [dict(zip(lowerCamelCase_ , lowerCamelCase_)) for t in zip(*data.values())]
UpperCamelCase , UpperCamelCase = self.scorer.predict(lowerCamelCase_ , gpus=lowerCamelCase_ , progress_bar=lowerCamelCase_)
return {"mean_score": mean_score, "scores": scores} | 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_ = {'facebook/bart-base': BartForConditionalGeneration}
SCREAMING_SNAKE_CASE_ = {'facebook/bart-base': BartTokenizer}
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' ,type=_lowercase ,default=_lowercase ,help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' ,type=_lowercase ,default=5 ,help='''The maximum total input sequence length after tokenization.''' ,)
parser.add_argument(
'''--num_beams''' ,type=_lowercase ,default=_lowercase ,help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) ,)
parser.add_argument(
'''--model_name_or_path''' ,type=_lowercase ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=_lowercase ,)
parser.add_argument(
'''--config_name''' ,type=_lowercase ,default=_lowercase ,help='''Pretrained config name or path if not the same as model_name''' ,)
parser.add_argument(
'''--device''' ,type=_lowercase ,default='''cpu''' ,help='''Device where the model will be run''' ,)
parser.add_argument('''--output_file_path''' ,type=_lowercase ,default=_lowercase ,help='''Where to store the final ONNX file.''' )
UpperCamelCase = parser.parse_args()
return args
def __snake_case ( _lowercase ,_lowercase="cpu" ):
"""simple docstring"""
UpperCamelCase = model_dict[model_name].from_pretrained(_lowercase ).to(_lowercase )
UpperCamelCase = tokenizer_dict[model_name].from_pretrained(_lowercase )
if model_name in ["facebook/bart-base"]:
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = 0
return huggingface_model, tokenizer
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
model.eval()
UpperCamelCase = None
UpperCamelCase = torch.jit.script(BARTBeamSearchGenerator(_lowercase ) )
with torch.no_grad():
UpperCamelCase = '''My friends are cool but they eat too many carbs.'''
UpperCamelCase = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors='''pt''' ).to(model.device )
UpperCamelCase = model.generate(
inputs['''input_ids'''] ,attention_mask=inputs['''attention_mask'''] ,num_beams=_lowercase ,max_length=_lowercase ,early_stopping=_lowercase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowercase ,(
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowercase ,opset_version=14 ,input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] ,output_names=['''output_ids'''] ,dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} ,example_outputs=_lowercase ,)
logger.info('''Model exported to {}'''.format(_lowercase ) )
UpperCamelCase = remove_dup_initializers(os.path.abspath(_lowercase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_lowercase ) )
UpperCamelCase = onnxruntime.InferenceSession(_lowercase )
UpperCamelCase = ort_sess.run(
_lowercase ,{
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_lowercase ),
'''max_length''': np.array(_lowercase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = parse_args()
UpperCamelCase = 5
UpperCamelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCamelCase = torch.device(args.device )
UpperCamelCase , UpperCamelCase = load_model_tokenizer(args.model_name_or_path ,_lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_lowercase )
if args.max_length:
UpperCamelCase = args.max_length
if args.num_beams:
UpperCamelCase = args.num_beams
if args.output_file_path:
UpperCamelCase = args.output_file_path
else:
UpperCamelCase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE_ = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = TaTokenizer
A_ = []
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<pad>" , lowerCamelCase_=1_0_0 , lowerCamelCase_=None , **lowerCamelCase_ , ) -> List[Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase = [F'<extra_id_{i}>' for i in range(lowerCamelCase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCamelCase = len(set(filter(lambda lowerCamelCase_: bool('''extra_id_''' in str(lowerCamelCase_)) , lowerCamelCase_)))
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''')
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , extra_ids=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
UpperCamelCase = extra_ids
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCamelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowerCamelCase_ , )
return max_model_length
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_):
copyfile(self.vocab_file , lowerCamelCase_)
logger.info(F'Copy vocab file to {out_vocab_file}')
return (out_vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCamelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def UpperCAmelCase__ ( self) -> Optional[Any]:
return list(
set(filter(lambda lowerCamelCase_: bool(re.search(R'''<extra_id_\d+>''' , lowerCamelCase_)) is not None , self.additional_special_tokens)))
def UpperCAmelCase__ ( self) -> List[str]:
return [self.convert_tokens_to_ids(lowerCamelCase_) for token in self.get_sentinel_tokens()] | 34 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 1 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE_ = 'base_with_context'
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) ,requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase = weights[f'layers_{lyr_num}']
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase = ly_weight['''attention''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) ,requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase = weights[f'layers_{lyr_num}']
UpperCamelCase = ly_weight['''attention''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) ,requires_grad=_lowercase )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCamelCase = weights[f'layers_{lyr_num}']
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCamelCase = ly_weight['''self_attention''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase = ly_weight['''MultiHeadDotProductAttention_0''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCamelCase = jnp.tree_util.tree_map(onp.array ,_lowercase )
UpperCamelCase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
UpperCamelCase = os.path.join(args.checkpoint_path ,'''..''' ,'''config.gin''' )
UpperCamelCase = inference.parse_training_gin_file(_lowercase ,_lowercase )
UpperCamelCase = inference.InferenceModel(args.checkpoint_path ,_lowercase )
UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ,variance_type='''fixed_large''' )
UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj='''gated-gelu''' ,)
UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length['''targets_context'''] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj='''gated-gelu''' ,)
UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length['''targets_context'''] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
UpperCamelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] ,_lowercase )
UpperCamelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] ,_lowercase )
UpperCamelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] ,_lowercase )
UpperCamelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=_lowercase ,continuous_encoder=_lowercase ,decoder=_lowercase ,scheduler=_lowercase ,melgan=_lowercase ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
main(args) | 34 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = GPTaTokenizer
A_ = GPTaTokenizerFast
A_ = True
A_ = {'''add_prefix_space''': True}
A_ = False
def UpperCAmelCase__ ( self) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase = {'''unk_token''': '''<unk>'''}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(lowerCamelCase_) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowerCamelCase_))
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Tuple:
kwargs.update(self.special_tokens_map)
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Tuple:
kwargs.update(self.special_tokens_map)
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = '''lower newer'''
UpperCamelCase = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCamelCase = '''lower newer'''
UpperCamelCase = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ , add_prefix_space=lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_)
UpperCamelCase = '''lower newer'''
# Testing tokenization
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ , add_prefix_space=lowerCamelCase_)
UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
# Testing conversion to ids without special tokens
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_)
UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
# Testing conversion to ids with special tokens
UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_)
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_prefix_space=lowerCamelCase_)
UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
# Testing the unknown token
UpperCamelCase = tokens + [rust_tokenizer.unk_token]
UpperCamelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCAmelCase__ ( self , lowerCamelCase_=1_5) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_)
# Simple input
UpperCamelCase = '''This is a simple input'''
UpperCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''')
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''')
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''')
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''')
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , )
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''')
# Simple input
UpperCamelCase = '''This is a simple input'''
UpperCamelCase = ['''This is a simple input looooooooong''', '''This is a simple input''']
UpperCamelCase = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
UpperCamelCase = tokenizer.pad_token_id
UpperCamelCase = tokenizer(lowerCamelCase_ , padding='''max_length''' , max_length=3_0 , return_tensors='''np''')
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , truncate=lowerCamelCase_ , return_tensors='''np''')
UpperCamelCase = tokenizer(*lowerCamelCase_ , padding='''max_length''' , max_length=6_0 , return_tensors='''np''')
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , truncate=lowerCamelCase_ , return_tensors='''np''')
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0)
self.assertTrue(pad_token_id in out_s['''input_ids'''])
self.assertTrue(0 in out_s['''attention_mask'''])
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0])
self.assertFalse(0 in out_sa['''attention_mask'''][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1])
self.assertTrue(0 in out_sa['''attention_mask'''][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0)
self.assertTrue(pad_token_id in out_p['''input_ids'''])
self.assertTrue(0 in out_p['''attention_mask'''])
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0])
self.assertFalse(0 in out_pa['''attention_mask'''][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1])
self.assertTrue(0 in out_pa['''attention_mask'''][1])
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''$$$'''
UpperCamelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCamelCase_ , add_bos_token=lowerCamelCase_)
UpperCamelCase = '''This is a simple input'''
UpperCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase = tokenizer.bos_token_id
UpperCamelCase = tokenizer(lowerCamelCase_)
UpperCamelCase = tokenizer(lowerCamelCase_)
self.assertEqual(out_s.input_ids[0] , lowerCamelCase_)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
UpperCamelCase = tokenizer.decode(out_s.input_ids)
UpperCamelCase = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , lowerCamelCase_)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
def UpperCAmelCase__ ( self) -> Dict:
pass
def UpperCAmelCase__ ( self) -> int:
# TODO: change to self.get_tokenizers() when the fast version is implemented
UpperCamelCase = [self.get_tokenizer(do_lower_case=lowerCamelCase_ , add_bos_token=lowerCamelCase_)]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
UpperCamelCase = '''Encode this.'''
UpperCamelCase = '''This one too please.'''
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
encoded_sequence += tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.encode_plus(
lowerCamelCase_ , lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , )
UpperCamelCase = encoded_sequence_dict['''input_ids''']
UpperCamelCase = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(lowerCamelCase_) , len(lowerCamelCase_))
UpperCamelCase = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCamelCase_)
]
UpperCamelCase = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Optional[int]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCamelCase_)
UpperCamelCase = '''A photo of a cat'''
UpperCamelCase = tokenizer.encode(
lowerCamelCase_ , )
self.assertEqual(lowerCamelCase_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
tokenizer.save_pretrained('''test_opt''')
UpperCamelCase = AutoTokenizer.from_pretrained('''./test_opt''')
UpperCamelCase = tokenizer.encode(
lowerCamelCase_ , )
self.assertEqual(lowerCamelCase_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=lowerCamelCase_)
UpperCamelCase = '''A photo of a cat'''
UpperCamelCase = tokenizer.encode(
lowerCamelCase_ , )
# Same as above
self.assertEqual(lowerCamelCase_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''')
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCamelCase_)
UpperCamelCase = '''bos'''
UpperCamelCase = tokenizer.get_vocab()['''bos''']
UpperCamelCase = '''A photo of a cat'''
UpperCamelCase = tokenizer.encode(
lowerCamelCase_ , )
# We changed the bos token
self.assertEqual(lowerCamelCase_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
tokenizer.save_pretrained('''./tok''')
UpperCamelCase = AutoTokenizer.from_pretrained('''./tok''')
self.assertTrue(tokenizer.is_fast)
UpperCamelCase = tokenizer.encode(
lowerCamelCase_ , )
self.assertEqual(lowerCamelCase_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8]) | 34 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase = 100_0000 ):
"""simple docstring"""
UpperCamelCase = set(range(3 ,_lowercase ,2 ) )
primes.add(2 )
for p in range(3 ,_lowercase ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,_lowercase ,_lowercase ) ) )
UpperCamelCase = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase ,limit + 1 ,_lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }') | 34 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
if not isinstance(_lowercase ,_lowercase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''biogpt'''
def __init__( self , lowerCamelCase_=4_2_3_8_4 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=2_4 , lowerCamelCase_=1_6 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , **lowerCamelCase_ , ) -> Union[str, Any]:
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = scale_embedding
UpperCamelCase = use_cache
UpperCamelCase = layerdrop
UpperCamelCase = activation_dropout
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_) | 34 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=3 , lowerCamelCase_=1_8 , lowerCamelCase_=3_0 , lowerCamelCase_=4_0_0 , lowerCamelCase_=True , lowerCamelCase_=3_2 , lowerCamelCase_=True , ) -> Union[str, Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size_divisor
UpperCamelCase = do_rescale
def UpperCAmelCase__ ( self) -> Any:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = GLPNImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = GLPNImageProcessingTester(self)
@property
def UpperCAmelCase__ ( self) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''size_divisor'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''resample'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''do_rescale'''))
def UpperCAmelCase__ ( self) -> str:
pass
def UpperCAmelCase__ ( self) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def UpperCAmelCase__ ( self) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def UpperCAmelCase__ ( self) -> Optional[int]:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) | 34 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '▁'
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
SCREAMING_SNAKE_CASE_ = {
'google/pegasus-xsum': 512,
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PegasusTokenizer
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<pad>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<mask_2>" , lowerCamelCase_="<mask_1>" , lowerCamelCase_=None , lowerCamelCase_=1_0_3 , **lowerCamelCase_ , ) -> Optional[int]:
UpperCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
raise TypeError(
F'additional_special_tokens should be of type {type(lowerCamelCase_)}, but is'
F' {type(lowerCamelCase_)}')
UpperCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(lowerCamelCase_) , self.offset - 1)
]
if len(set(lowerCamelCase_)) != len(lowerCamelCase_):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
UpperCamelCase = additional_special_tokens_extended
else:
UpperCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset)]
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , pad_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , mask_token_sent=lowerCamelCase_ , offset=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[Any]:
UpperCamelCase = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F' {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}')
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase_)
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_):
copyfile(self.vocab_file , lowerCamelCase_)
return (out_vocab_file,) | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 1 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> Optional[int]:
super().__init__()
UpperCamelCase = torchvision.models.resnetaaa(pretrained=lowerCamelCase_)
UpperCamelCase = list(model.children())[:-2]
UpperCamelCase = nn.Sequential(*lowerCamelCase_)
UpperCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCamelCase = self.pool(self.model(lowerCamelCase_))
UpperCamelCase = torch.flatten(lowerCamelCase_ , start_dim=2)
UpperCamelCase = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase = [json.loads(lowerCamelCase_) for l in open(lowerCamelCase_)]
UpperCamelCase = os.path.dirname(lowerCamelCase_)
UpperCamelCase = tokenizer
UpperCamelCase = labels
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = max_seq_length
UpperCamelCase = transforms
def __len__( self) -> int:
return len(self.data)
def __getitem__( self , lowerCamelCase_) -> int:
UpperCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowerCamelCase_))
UpperCamelCase , UpperCamelCase , UpperCamelCase = sentence[0], sentence[1:-1], sentence[-1]
UpperCamelCase = sentence[: self.max_seq_length]
UpperCamelCase = torch.zeros(self.n_classes)
UpperCamelCase = 1
UpperCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''])).convert('''RGB''')
UpperCamelCase = self.transforms(lowerCamelCase_)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = Counter()
for row in self.data:
label_freqs.update(row['''label'''])
return label_freqs
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [len(row['''sentence'''] ) for row in batch]
UpperCamelCase , UpperCamelCase = len(_lowercase ), max(_lowercase )
UpperCamelCase = torch.zeros(_lowercase ,_lowercase ,dtype=torch.long )
UpperCamelCase = torch.zeros(_lowercase ,_lowercase ,dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowercase ,_lowercase ) ):
UpperCamelCase = input_row['''sentence''']
UpperCamelCase = 1
UpperCamelCase = torch.stack([row['''image'''] for row in batch] )
UpperCamelCase = torch.stack([row['''label'''] for row in batch] )
UpperCamelCase = torch.stack([row['''image_start_token'''] for row in batch] )
UpperCamelCase = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __snake_case ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __snake_case ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] ,std=[0.12221994, 0.12145835, 0.14380469] ,),
] ) | 34 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
SCREAMING_SNAKE_CASE_ = int(input('Enter number: ').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.') | 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = PhobertTokenizer
A_ = False
def UpperCAmelCase__ ( self) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = ['''#version: 0.2''', '''l à</w>''']
UpperCamelCase = {'''unk_token''': '''<unk>'''}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowerCamelCase_))
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = '''Tôi là VinAI Research'''
UpperCamelCase = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCamelCase = '''Tôi là VinAI Research'''
UpperCamelCase = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_)
print(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_) , lowerCamelCase_) | 34 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 1 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''cvt'''
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=[7, 3, 3] , lowerCamelCase_=[4, 2, 2] , lowerCamelCase_=[2, 1, 1] , lowerCamelCase_=[6_4, 1_9_2, 3_8_4] , lowerCamelCase_=[1, 3, 6] , lowerCamelCase_=[1, 2, 1_0] , lowerCamelCase_=[4.0, 4.0, 4.0] , lowerCamelCase_=[0.0, 0.0, 0.0] , lowerCamelCase_=[0.0, 0.0, 0.0] , lowerCamelCase_=[0.0, 0.0, 0.1] , lowerCamelCase_=[True, True, True] , lowerCamelCase_=[False, False, True] , lowerCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , lowerCamelCase_=[3, 3, 3] , lowerCamelCase_=[1, 1, 1] , lowerCamelCase_=[2, 2, 2] , lowerCamelCase_=[1, 1, 1] , lowerCamelCase_=[1, 1, 1] , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(**lowerCamelCase_)
UpperCamelCase = num_channels
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = depth
UpperCamelCase = mlp_ratio
UpperCamelCase = attention_drop_rate
UpperCamelCase = drop_rate
UpperCamelCase = drop_path_rate
UpperCamelCase = qkv_bias
UpperCamelCase = cls_token
UpperCamelCase = qkv_projection_method
UpperCamelCase = kernel_qkv
UpperCamelCase = padding_kv
UpperCamelCase = stride_kv
UpperCamelCase = padding_q
UpperCamelCase = stride_q
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps | 34 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''swinv2'''
A_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase_=2_2_4 , lowerCamelCase_=4 , lowerCamelCase_=3 , lowerCamelCase_=9_6 , lowerCamelCase_=[2, 2, 6, 2] , lowerCamelCase_=[3, 6, 1_2, 2_4] , lowerCamelCase_=7 , lowerCamelCase_=4.0 , lowerCamelCase_=True , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_="gelu" , lowerCamelCase_=False , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_=3_2 , **lowerCamelCase_ , ) -> Any:
super().__init__(**lowerCamelCase_)
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_) - 1))
UpperCamelCase = (0, 0, 0, 0) | 34 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('''backbone.0.body''' ,'''backbone.conv_encoder.model''' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(_lowercase ,_lowercase )
UpperCamelCase = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = F.to_tensor(_lowercase )
UpperCamelCase = F.normalize(_lowercase ,mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(_lowercase ,_lowercase ,_lowercase )
UpperCamelCase = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='''resnet18''' ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='''coco_detection''' ,max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase = TableTransformerForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion
UpperCamelCase = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase = hf_hub_download(repo_id='''nielsr/example-pdf''' ,repo_type='''dataset''' ,filename=_lowercase )
UpperCamelCase = Image.open(_lowercase ).convert('''RGB''' )
UpperCamelCase = normalize(resize(_lowercase ,_lowercase ) ).unsqueeze(0 )
UpperCamelCase = model(_lowercase )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCamelCase = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCamelCase = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,_lowercase ,atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(_lowercase )
image_processor.push_to_hub(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 34 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=_lowercase ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' ,type=_lowercase ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=_lowercase )
return parser.parse_args()
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = parse_args()
# Import training_script as a module.
UpperCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase = script_fpath.stem
UpperCamelCase = importlib.import_module(_lowercase )
# Patch sys.argv
UpperCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import string
def __snake_case ( _lowercase ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase = string.ascii_uppercase.find(_lowercase )
UpperCamelCase = num - key
if num < 0:
UpperCamelCase = num + len(string.ascii_uppercase )
UpperCamelCase = translated + string.ascii_uppercase[num]
else:
UpperCamelCase = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = input('''Encrypted message: ''' )
UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 34 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 1 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
SCREAMING_SNAKE_CASE_ = {'comet'}
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec('fairseq') is not None
SCREAMING_SNAKE_CASE_ = {'code_eval'}
SCREAMING_SNAKE_CASE_ = os.name == 'nt'
SCREAMING_SNAKE_CASE_ = {'bertscore', 'frugalscore', 'perplexity'}
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec('transformers') is not None
def __snake_case ( _lowercase ):
"""simple docstring"""
@wraps(_lowercase )
def wrapper(self ,_lowercase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self ,_lowercase )
return wrapper
def __snake_case ( _lowercase ):
"""simple docstring"""
@wraps(_lowercase )
def wrapper(self ,_lowercase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self ,_lowercase )
return wrapper
def __snake_case ( _lowercase ):
"""simple docstring"""
@wraps(_lowercase )
def wrapper(self ,_lowercase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self ,_lowercase )
return wrapper
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@local
class snake_case_ ( parameterized.TestCase ):
"""simple docstring"""
A_ = {}
A_ = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''')
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''')
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
UpperCamelCase = '''[...]'''
UpperCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , lowerCamelCase_)).module_path)
UpperCamelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCamelCase_)
# check parameters
UpperCamelCase = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCamelCase_ , metric_module.__name__):
with self.use_local_metrics():
try:
UpperCamelCase = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = '''[...]'''
UpperCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , lowerCamelCase_)).module_path)
# run doctest
with self.use_local_metrics():
UpperCamelCase = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCamelCase_):
yield
else:
yield
@contextmanager
def UpperCAmelCase__ ( self) -> Dict:
def load_local_metric(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_):
return load_metric(os.path.join('''metrics''' , lowerCamelCase_) , *lowerCamelCase_ , **lowerCamelCase_)
with patch('''datasets.load_metric''') as mock_load_metric:
UpperCamelCase = load_local_metric
yield
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_) -> Tuple:
def wrapper(lowerCamelCase_):
UpperCamelCase = contextmanager(lowerCamelCase_)
UpperCamelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __snake_case ( _lowercase ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
assert len(input_dict['''input_ids''']) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
UpperCamelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __snake_case ( _lowercase ):
"""simple docstring"""
import torch
def bert_cos_score_idf(_lowercase ,_lowercase ,*_lowercase ,**_lowercase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_lowercase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
UpperCamelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __snake_case ( _lowercase ):
"""simple docstring"""
def load_from_checkpoint(_lowercase ):
class snake_case_ :
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
assert len(lowerCamelCase_) == 2
UpperCamelCase = [0.19, 0.92]
return scores, sum(lowerCamelCase_) / len(lowerCamelCase_)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
UpperCamelCase = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
UpperCamelCase = load_from_checkpoint
yield
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) )
UpperCamelCase = '''ERROR'''
UpperCamelCase = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(_lowercase ,match=re.escape(_lowercase ) ):
metric.compute(predictions=[] ,references=[] ,scheme=_lowercase ) | 34 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 1 |
"""simple docstring"""
import pprint
import requests
SCREAMING_SNAKE_CASE_ = 'https://zenquotes.io/api'
def __snake_case ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __snake_case ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = random_quotes()
pprint.pprint(response) | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = [0] * no_of_processes
UpperCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowercase ):
UpperCamelCase = burst_time[i]
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
UpperCamelCase = []
UpperCamelCase = -1
for i in range(_lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowercase )
if len(_lowercase ) > 0:
UpperCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
UpperCamelCase = i
total_time += burst_time[target_process]
completed += 1
UpperCamelCase = 0
UpperCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = [0] * no_of_processes
for i in range(_lowercase ):
UpperCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
f'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(f'\nAverage waiting time = {mean(waiting_time):.5f}')
print(f'Average turnaround time = {mean(turn_around_time):.5f}') | 34 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''timm_backbone'''
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCamelCase_)
UpperCamelCase = backbone
UpperCamelCase = num_channels
UpperCamelCase = features_only
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = True
UpperCamelCase = out_indices if out_indices is not None else (-1,) | 34 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = LEDTokenizer
A_ = LEDTokenizerFast
A_ = True
def UpperCAmelCase__ ( self) -> int:
super().setUp()
UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase = {'''unk_token''': '''<unk>'''}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(lowerCamelCase_) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowerCamelCase_))
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Optional[int]:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase__ ( self) -> Any:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''')
@cached_property
def UpperCAmelCase__ ( self) -> Any:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''')
@require_torch
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , max_length=len(lowerCamelCase_) , padding=lowerCamelCase_ , return_tensors='''pt''')
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
@require_torch
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''')
self.assertIn('''input_ids''' , lowerCamelCase_)
self.assertIn('''attention_mask''' , lowerCamelCase_)
self.assertNotIn('''labels''' , lowerCamelCase_)
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_)
@require_torch
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(text_target=lowerCamelCase_ , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''')
self.assertEqual(3_2 , targets['''input_ids'''].shape[1])
@require_torch
def UpperCAmelCase__ ( self) -> Union[str, Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''')
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2))
@require_torch
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = ['''A long paragraph for summarization.''']
UpperCamelCase = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors='''pt''')
UpperCamelCase = tokenizer(text_target=lowerCamelCase_ , return_tensors='''pt''')
UpperCamelCase = inputs['''input_ids''']
UpperCamelCase = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def UpperCAmelCase__ ( self) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = ['''Summary of the text.''', '''Another summary.''']
UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_)
UpperCamelCase = [[0] * len(lowerCamelCase_) for x in encoded_output['''input_ids''']]
UpperCamelCase = tokenizer.pad(lowerCamelCase_)
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Tuple:
pass
def UpperCAmelCase__ ( self) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = '''A, <mask> AllenNLP sentence.'''
UpperCamelCase = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_)
UpperCamelCase = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_)
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>''']) | 34 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 1 |
"""simple docstring"""
import random
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
if index >= len(_lowercase ) or index < 0:
return None
UpperCamelCase = items[random.randint(0 ,len(_lowercase ) - 1 )]
UpperCamelCase = 0
UpperCamelCase , UpperCamelCase , UpperCamelCase = _partition(_lowercase ,_lowercase )
UpperCamelCase = len(_lowercase )
UpperCamelCase = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase ,_lowercase )
# must be in larger
else:
return quick_select(_lowercase ,index - (m + count) ) | 34 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 1 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''audio_values''', '''audio_mask''']
def __init__( self , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=1 , lowerCamelCase_=[1_6, 1_6] , lowerCamelCase_=1_2_8 , lowerCamelCase_=4_4_1_0_0 , lowerCamelCase_=8_6 , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=0.0 , **lowerCamelCase_ , ) -> Dict:
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = spectrogram_length
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = feature_size // self.patch_size[1]
UpperCamelCase = n_fft
UpperCamelCase = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase = sampling_rate
UpperCamelCase = padding_value
UpperCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def UpperCAmelCase__ ( self , lowerCamelCase_) -> np.ndarray:
UpperCamelCase = spectrogram(
lowerCamelCase_ , window_function(self.n_fft , '''hann''') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
UpperCamelCase = log_spec[:, :-1]
UpperCamelCase = log_spec - 20.0
UpperCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , **lowerCamelCase_ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
UpperCamelCase = isinstance(lowerCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
UpperCamelCase = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
UpperCamelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray):
UpperCamelCase = np.asarray(lowerCamelCase_ , dtype=np.floataa)
elif isinstance(lowerCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
UpperCamelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCamelCase_):
UpperCamelCase = [np.asarray(lowerCamelCase_ , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
UpperCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase = np.array(lowerCamelCase_).astype(np.floataa)
# convert into correct format for padding
UpperCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase = np.ones([len(lowerCamelCase_), 1, max_time_len, self.feature_size]).astype(np.floataa)
UpperCamelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCamelCase_)):
UpperCamelCase = audio_features[i]
UpperCamelCase = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
UpperCamelCase = {'''audio_values''': padded_audio_features}
UpperCamelCase = BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_)
return encoded_inputs | 34 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''unispeech-sat'''
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase_=(1_0, 3, 3, 3, 3, 2, 2) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_=3_2_0 , lowerCamelCase_=2 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_0 , lowerCamelCase_=2_5_6 , lowerCamelCase_=2_5_6 , lowerCamelCase_=0.1 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCamelCase_=(5, 3, 3, 1, 1) , lowerCamelCase_=(1, 2, 3, 1, 1) , lowerCamelCase_=5_1_2 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , lowerCamelCase_=5_0_4 , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_)
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim)
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase__ ( self) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1) | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = str(_lowercase )
return n == n[::-1]
def __snake_case ( _lowercase = 100_0000 ):
"""simple docstring"""
UpperCamelCase = 0
for i in range(1 ,_lowercase ):
if is_palindrome(_lowercase ) and is_palindrome(bin(_lowercase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip())))) | 34 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 1 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__)
class snake_case_ ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
A_ = None
A_ = None
class snake_case_ ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
A_ = datasets.Audio()
A_ = '''audio'''
A_ = AudioFolderConfig
A_ = 42 # definition at the bottom of the script
A_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
SCREAMING_SNAKE_CASE_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
SCREAMING_SNAKE_CASE_ = AUDIO_EXTENSIONS | 34 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase=None ,_lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=_lowercase )
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
A_ = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
A_ = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
A_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
A_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Benchmark training of model'''} )
A_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Verbose memory tracing'''} )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
A_ = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
A_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Trace memory line by line'''} )
A_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Save result to a CSV file'''} )
A_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Save all print statements in a log file'''} )
A_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to print environment information'''} )
A_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
A_ = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
A_ = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
A_ = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
A_ = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
A_ = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
A_ = field(
default=f"""log_{round(time() )}.csv""" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
A_ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
A_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def UpperCAmelCase__ ( self) -> Tuple:
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def UpperCAmelCase__ ( self) -> List[str]:
if len(self.models) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''')
return self.models
@property
def UpperCAmelCase__ ( self) -> Union[str, Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''')
return False
else:
return True | 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
SCREAMING_SNAKE_CASE_ = sys.version_info >= (3, 10)
def __snake_case ( _lowercase=None ,_lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=_lowercase )
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
A_ = 42
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = False
A_ = True
A_ = None
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''titi'''
A_ = '''toto'''
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''titi'''
A_ = '''toto'''
A_ = 42
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = "toto"
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = BasicEnum(self.foo)
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = "toto"
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = MixedTypeEnum(self.foo)
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = None
A_ = field(default=lowerCamelCase_ , metadata={'''help''': '''help message'''} )
A_ = None
A_ = list_field(default=[] )
A_ = list_field(default=[] )
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = list_field(default=[] )
A_ = list_field(default=[1, 2, 3] )
A_ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
A_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = field()
A_ = field()
A_ = field()
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = BasicEnum(self.required_enum)
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = field()
A_ = None
A_ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
A_ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = False
A_ = True
A_ = None
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = None
A_ = field(default=lowerCamelCase_ , metadata={'''help''': '''help message'''} )
A_ = None
A_ = list_field(default=[] )
A_ = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
UpperCamelCase = {k: v for k, v in vars(lowerCamelCase_).items() if k != '''container'''}
UpperCamelCase = {k: v for k, v in vars(lowerCamelCase_).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowerCamelCase_) and yy.get('''choices''' , lowerCamelCase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowerCamelCase_) , yy['''type'''](lowerCamelCase_))
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCamelCase_ , required=lowerCamelCase_)
expected.add_argument('''--bar''' , type=lowerCamelCase_ , required=lowerCamelCase_)
expected.add_argument('''--baz''' , type=lowerCamelCase_ , required=lowerCamelCase_)
expected.add_argument('''--flag''' , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs='''?''')
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((UpperCamelCase) , ) = parser.parse_args_into_dataclasses(lowerCamelCase_ , look_for_args_file=lowerCamelCase_)
self.assertFalse(example.flag)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=lowerCamelCase_)
expected.add_argument('''--baz''' , default='''toto''' , type=lowerCamelCase_ , help='''help message''')
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs='''?''')
expected.add_argument('''--baz''' , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs='''?''')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowerCamelCase_ , dest='''baz''')
expected.add_argument('''--opt''' , type=lowerCamelCase_ , default=lowerCamelCase_)
UpperCamelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase_)
for dataclass_type in dataclass_types:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = parser.parse_args([])
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_))
UpperCamelCase = parser.parse_args(['''--foo''', '''--no_baz'''])
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_))
UpperCamelCase = parser.parse_args(['''--foo''', '''--baz'''])
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_))
UpperCamelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''])
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_))
UpperCamelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''])
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_))
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2]) , )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = parser.parse_args([])
self.assertEqual(args.foo , '''toto''')
UpperCamelCase = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''])
self.assertEqual(args.foo , '''titi''')
UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
UpperCamelCase = parser.parse_args(['''--foo''', '''42'''])
self.assertEqual(args.foo , 4_2)
UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def UpperCAmelCase__ ( self) -> List[Any]:
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = "toto"
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2]) , )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = parser.parse_args([])
self.assertEqual(args.foo , '''toto''')
UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''])
self.assertEqual(args.foo , '''titi''')
UpperCamelCase = parser.parse_args(['''--foo''', '''42'''])
self.assertEqual(args.foo , 4_2)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowerCamelCase_)
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowerCamelCase_)
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCamelCase_)
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowerCamelCase_)
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = parser.parse_args([])
self.assertEqual(
lowerCamelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3]) , )
UpperCamelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split())
self.assertEqual(lowerCamelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7]))
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowerCamelCase_ , type=lowerCamelCase_)
expected.add_argument('''--bar''' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='''help message''')
expected.add_argument('''--baz''' , default=lowerCamelCase_ , type=lowerCamelCase_)
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowerCamelCase_)
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowerCamelCase_)
UpperCamelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase_)
for dataclass_type in dataclass_types:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = parser.parse_args([])
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , bar=lowerCamelCase_ , baz=lowerCamelCase_ , ces=[] , des=[]))
UpperCamelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split())
self.assertEqual(lowerCamelCase_ , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3]))
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowerCamelCase_ , required=lowerCamelCase_)
expected.add_argument('''--required_str''' , type=lowerCamelCase_ , required=lowerCamelCase_)
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto''']) , choices=['''titi''', '''toto'''] , required=lowerCamelCase_ , )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCamelCase_ , required=lowerCamelCase_)
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto''']) , choices=['''titi''', '''toto'''] , required=lowerCamelCase_ , )
expected.add_argument('''--opt''' , type=lowerCamelCase_ , default=lowerCamelCase_)
expected.add_argument('''--baz''' , default='''toto''' , type=lowerCamelCase_ , help='''help message''')
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCamelCase_)
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
UpperCamelCase = parser.parse_dict(lowerCamelCase_)[0]
UpperCamelCase = BasicExample(**lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(lowerCamelCase_ , parser.parse_dict , lowerCamelCase_ , allow_extra_keys=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(lowerCamelCase_ , '''temp_json''')
os.mkdir(lowerCamelCase_)
with open(temp_local_path + '''.json''' , '''w+''') as f:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json'''))[0]
UpperCamelCase = BasicExample(**lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
UpperCamelCase = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(lowerCamelCase_ , '''temp_yaml''')
os.mkdir(lowerCamelCase_)
with open(temp_local_path + '''.yaml''' , '''w+''') as f:
yaml.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml'''))[0]
UpperCamelCase = BasicExample(**lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = HfArgumentParser(lowerCamelCase_)
self.assertIsNotNone(lowerCamelCase_) | 34 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = (UniPCMultistepScheduler,)
A_ = (('''num_inference_steps''', 25),)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> int:
UpperCamelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**lowerCamelCase_)
return config
def UpperCAmelCase__ ( self , lowerCamelCase_=0 , **lowerCamelCase_) -> int:
UpperCamelCase = dict(self.forward_default_kwargs)
UpperCamelCase = kwargs.pop('''num_inference_steps''' , lowerCamelCase_)
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
scheduler.set_timesteps(lowerCamelCase_)
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_)
UpperCamelCase = scheduler_class.from_pretrained(lowerCamelCase_)
new_scheduler.set_timesteps(lowerCamelCase_)
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase , UpperCamelCase = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1):
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
UpperCamelCase = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , lowerCamelCase_=0 , **lowerCamelCase_) -> Any:
UpperCamelCase = dict(self.forward_default_kwargs)
UpperCamelCase = kwargs.pop('''num_inference_steps''' , lowerCamelCase_)
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**lowerCamelCase_)
scheduler.set_timesteps(lowerCamelCase_)
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_)
UpperCamelCase = scheduler_class.from_pretrained(lowerCamelCase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_)
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
UpperCamelCase = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , lowerCamelCase_=None , **lowerCamelCase_) -> List[Any]:
if scheduler is None:
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
UpperCamelCase = 1_0
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_)
for i, t in enumerate(scheduler.timesteps):
UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_).prev_sample
return sample
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = dict(self.forward_default_kwargs)
UpperCamelCase = kwargs.pop('''num_inference_steps''' , lowerCamelCase_)
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**lowerCamelCase_)
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , '''set_timesteps'''):
scheduler.set_timesteps(lowerCamelCase_)
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , '''set_timesteps'''):
UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase = scheduler.timesteps[5]
UpperCamelCase = scheduler.timesteps[6]
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def UpperCAmelCase__ ( self) -> int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCamelCase = UniPCMultistepScheduler(**self.get_scheduler_config())
UpperCamelCase = self.full_loop(scheduler=lowerCamelCase_)
UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_))
assert abs(result_mean.item() - 0.2464) < 1e-3
UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config)
UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCamelCase = self.full_loop(scheduler=lowerCamelCase_)
UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_))
assert abs(result_mean.item() - 0.2464) < 1e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[Any]:
self.check_over_configs(thresholding=lowerCamelCase_)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def UpperCAmelCase__ ( self) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Tuple:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , )
UpperCamelCase = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self) -> Optional[Any]:
self.check_over_configs(lower_order_final=lowerCamelCase_)
self.check_over_configs(lower_order_final=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0)
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_))
assert abs(result_mean.item() - 0.2464) < 1e-3
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = self.full_loop(prediction_type='''v_prediction''')
UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_))
assert abs(result_mean.item() - 0.1014) < 1e-3
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
UpperCamelCase = 1_0
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_)
for i, t in enumerate(scheduler.timesteps):
UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> str:
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps | 34 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 1 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=3_0 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0 , lowerCamelCase_=0.02 , lowerCamelCase_=None , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) -> List[Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = ViTMSNModel(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = ViTMSNForImageClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_)
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''')
print('''Labels: {labels}''')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = ViTMSNForImageClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
A_ = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = ViTMSNModelTester(self)
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=3_7)
def UpperCAmelCase__ ( self) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''')
def UpperCAmelCase__ ( self) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear))
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_)
UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_)
@slow
def UpperCAmelCase__ ( self) -> List[Any]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ViTMSNModel.from_pretrained(lowerCamelCase_)
self.assertIsNotNone(lowerCamelCase_)
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self) -> List[str]:
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''') if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self) -> str:
torch.manual_seed(2)
UpperCamelCase = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''').to(lowerCamelCase_)
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors='''pt''').to(lowerCamelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_)
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCamelCase_)
UpperCamelCase = torch.tensor([-0.0803, -0.4454, -0.2375]).to(lowerCamelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4)) | 34 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import random
class snake_case_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> tuple[list[int], list[int]]:
UpperCamelCase = [ord(lowerCamelCase_) for i in text]
UpperCamelCase = []
UpperCamelCase = []
for i in plain:
UpperCamelCase = random.randint(1 , 3_0_0)
UpperCamelCase = (i + k) * k
cipher.append(lowerCamelCase_)
key.append(lowerCamelCase_)
return cipher, key
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = []
for i in range(len(lowerCamelCase_)):
UpperCamelCase = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(lowerCamelCase_))
return "".join(lowerCamelCase_)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k)) | 34 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
SCREAMING_SNAKE_CASE_ = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = True
while ask_again:
UpperCamelCase = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def __snake_case ( _lowercase ,_lowercase=[] ,_lowercase=None ,_lowercase=0 ):
"""simple docstring"""
UpperCamelCase = BulletMenu(_lowercase ,_lowercase )
UpperCamelCase = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def __snake_case ( _lowercase ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class snake_case_ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = usage.replace('''<command> [<args>] ''' , '''''')
return usage | 34 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import numpy as np
def __snake_case ( _lowercase ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def __snake_case ( _lowercase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_lowercase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2
while True:
if is_prime(_lowercase ):
yield num
num += 1
def __snake_case ( _lowercase = 200_0000 ):
"""simple docstring"""
return sum(takewhile(lambda _lowercase : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f'{solution() = }') | 34 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 1 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
SCREAMING_SNAKE_CASE_ = False
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_=3_2) -> Dict:
set_seed(0)
UpperCamelCase = UNetaDModel(sample_size=lowerCamelCase_ , in_channels=3 , out_channels=3)
UpperCamelCase = torch.optim.SGD(model.parameters() , lr=0.0001)
return model, optimizer
@slow
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
UpperCamelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=lowerCamelCase_ , )
UpperCamelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=lowerCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
UpperCamelCase = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 , 1).to(lowerCamelCase_) for _ in range(4)]
UpperCamelCase = [torch.randn((4, 3, 3_2, 3_2)).to(lowerCamelCase_) for _ in range(4)]
UpperCamelCase = [torch.randint(0 , 1_0_0_0 , (4,)).long().to(lowerCamelCase_) for _ in range(4)]
# train with a DDPM scheduler
UpperCamelCase , UpperCamelCase = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCamelCase_)
for i in range(4):
optimizer.zero_grad()
UpperCamelCase = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
UpperCamelCase = model(lowerCamelCase_ , timesteps[i]).sample
UpperCamelCase = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
UpperCamelCase , UpperCamelCase = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCamelCase_)
for i in range(4):
optimizer.zero_grad()
UpperCamelCase = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
UpperCamelCase = model(lowerCamelCase_ , timesteps[i]).sample
UpperCamelCase = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5))
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5)) | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case_ :
"""simple docstring"""
def __init__( self) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def UpperCAmelCase__ ( self) -> list[float]:
UpperCamelCase = len(self.first_signal)
UpperCamelCase = len(self.second_signal)
UpperCamelCase = max(lowerCamelCase_ , lowerCamelCase_)
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(lowerCamelCase_)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase_):
UpperCamelCase = deque(self.second_signal)
rotated_signal.rotate(lowerCamelCase_)
for j, item in enumerate(lowerCamelCase_):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(lowerCamelCase_) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowerCamelCase_ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 1 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE_ = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 'hopper-medium-v2'
SCREAMING_SNAKE_CASE_ = gym.make(env_name)
SCREAMING_SNAKE_CASE_ = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE_ = env.reset()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1000
SCREAMING_SNAKE_CASE_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE_ = pipeline(obs, planning_horizon=32)
# execute action in environment
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = env.step(denorm_actions)
SCREAMING_SNAKE_CASE_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE_ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}') | 34 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 34 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
SCREAMING_SNAKE_CASE_ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = (
list(range(ord('''!''' ) ,ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) ,ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) ,ord('''ÿ''' ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase ,_lowercase ) )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="replace" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_=False , **lowerCamelCase_ , ) -> List[Any]:
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''') as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_)
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='''utf-8''') as merges_handle:
UpperCamelCase = merges_handle.read().split('''\n''')[1:-1]
UpperCamelCase = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self) -> Tuple:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_)
UpperCamelCase = get_pairs(lowerCamelCase_)
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_: self.bpe_ranks.get(lowerCamelCase_ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase = tuple(lowerCamelCase_)
UpperCamelCase = new_word
if len(lowerCamelCase_) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_)
UpperCamelCase = ''' '''.join(lowerCamelCase_)
UpperCamelCase = word
return word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_):
UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_).split(''' '''))
return bpe_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.decoder.get(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_) + '''\n''')
UpperCamelCase = 0
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''')
UpperCamelCase = token_index
writer.write(''' '''.join(lowerCamelCase_) + '''\n''')
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_)) + [1]
return [1] + ([0] * len(lowerCamelCase_)) + [1, 1] + ([0] * len(lowerCamelCase_)) + [1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=False , **lowerCamelCase_) -> str:
UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_) > 0 and not text[0].isspace()):
UpperCamelCase = ''' ''' + text
return (text, kwargs)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> str:
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[int]:
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text)
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase_)
UpperCamelCase = ''' '''.join(lowerCamelCase_)
UpperCamelCase = self.encode(lowerCamelCase_)
if len(lowerCamelCase_) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids | 34 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> None:
UpperCamelCase = data
UpperCamelCase = None
UpperCamelCase = None
def __snake_case ( _lowercase ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __snake_case ( _lowercase ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def __snake_case ( _lowercase ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __snake_case ( ): # Main function for testing.
"""simple docstring"""
UpperCamelCase = Node(1 )
UpperCamelCase = Node(2 )
UpperCamelCase = Node(3 )
UpperCamelCase = Node(4 )
UpperCamelCase = Node(5 )
UpperCamelCase = Node(6 )
UpperCamelCase = Node(7 )
UpperCamelCase = Node(8 )
UpperCamelCase = Node(9 )
print(is_full_binary_tree(_lowercase ) )
print(depth_of_tree(_lowercase ) )
print('''Tree is: ''' )
display(_lowercase )
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase ): # noqa: E741
"""simple docstring"""
UpperCamelCase = len(_lowercase )
UpperCamelCase = 0
UpperCamelCase = [0] * n
UpperCamelCase = [False] * n
UpperCamelCase = [False] * n
def dfs(_lowercase ,_lowercase ,_lowercase ,_lowercase ):
if parent == root:
out_edge_count += 1
UpperCamelCase = True
UpperCamelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCamelCase = dfs(_lowercase ,_lowercase ,_lowercase ,_lowercase )
UpperCamelCase = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
UpperCamelCase = True
# AP found via cycle
if at == low[to]:
UpperCamelCase = True
else:
UpperCamelCase = min(low[at] ,_lowercase )
return out_edge_count
for i in range(_lowercase ):
if not visited[i]:
UpperCamelCase = 0
UpperCamelCase = dfs(_lowercase ,_lowercase ,-1 ,_lowercase )
UpperCamelCase = out_edge_count > 1
for x in range(len(_lowercase ) ):
if is_art[x] is True:
print(_lowercase )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 34 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 1 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase_ , ) | 34 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 1 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = BertJapaneseTokenizer
A_ = False
A_ = True
def UpperCAmelCase__ ( self) -> Optional[Any]:
super().setUp()
UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[Any]:
UpperCamelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
UpperCamelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
UpperCamelCase , UpperCamelCase = self.get_input_output_texts(lowerCamelCase_)
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_)
return text, ids
def UpperCAmelCase__ ( self) -> str:
pass # TODO add if relevant
def UpperCAmelCase__ ( self) -> List[str]:
pass # TODO add if relevant
def UpperCAmelCase__ ( self) -> Optional[int]:
pass # TODO add if relevant
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.tokenizer_class(self.vocab_file)
UpperCamelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''')
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''')
self.assertIsNotNone(lowerCamelCase_)
UpperCamelCase = '''こんにちは、世界。\nこんばんは、世界。'''
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
UpperCamelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(lowerCamelCase_ , '''wb''') as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_)
with open(lowerCamelCase_ , '''rb''') as handle:
UpperCamelCase = pickle.load(lowerCamelCase_)
UpperCamelCase = tokenizer_new.tokenize(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = MecabTokenizer(mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def UpperCAmelCase__ ( self) -> int:
try:
UpperCamelCase = MecabTokenizer(mecab_dic='''unidic_lite''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def UpperCAmelCase__ ( self) -> Optional[Any]:
try:
UpperCamelCase = MecabTokenizer(mecab_dic='''unidic''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = MecabTokenizer(do_lower_case=lowerCamelCase_ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def UpperCAmelCase__ ( self) -> int:
try:
UpperCamelCase = MecabTokenizer(
do_lower_case=lowerCamelCase_ , normalize_text=lowerCamelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = MecabTokenizer(normalize_text=lowerCamelCase_ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''')
self.assertIsNotNone(lowerCamelCase_)
UpperCamelCase = '''こんにちは、世界。\nこんばんは、世界。'''
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
UpperCamelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(lowerCamelCase_ , '''wb''') as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_)
with open(lowerCamelCase_ , '''rb''') as handle:
UpperCamelCase = pickle.load(lowerCamelCase_)
UpperCamelCase = tokenizer_new.tokenize(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
@require_sudachi
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = SudachiTokenizer(sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国''', '''人''', '''参政''', '''権'''])
@require_sudachi
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人''', '''参政権'''])
@require_sudachi
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人参政権'''])
@require_sudachi
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = SudachiTokenizer(do_lower_case=lowerCamelCase_ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = SudachiTokenizer(normalize_text=lowerCamelCase_ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = SudachiTokenizer(trim_whitespace=lowerCamelCase_ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''')
self.assertIsNotNone(lowerCamelCase_)
UpperCamelCase = '''こんにちは、世界。\nこんばんは、世界。'''
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
UpperCamelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(lowerCamelCase_ , '''wb''') as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_)
with open(lowerCamelCase_ , '''rb''') as handle:
UpperCamelCase = pickle.load(lowerCamelCase_)
UpperCamelCase = tokenizer_new.tokenize(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
@require_jumanpp
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = JumanppTokenizer(do_lower_case=lowerCamelCase_)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = JumanppTokenizer(normalize_text=lowerCamelCase_)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = JumanppTokenizer(trim_whitespace=lowerCamelCase_)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''') , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
UpperCamelCase = {}
for i, token in enumerate(lowerCamelCase_):
UpperCamelCase = i
UpperCamelCase = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こんにちは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは''') , ['''こん''', '''##ばんは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''') , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''])
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''')
UpperCamelCase = tokenizer.subword_tokenizer
UpperCamelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''')
self.assertListEqual(lowerCamelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''])
UpperCamelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''')
self.assertListEqual(lowerCamelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''])
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''')
UpperCamelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_)
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = BertJapaneseTokenizer
A_ = False
def UpperCAmelCase__ ( self) -> str:
super().setUp()
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> str:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
UpperCamelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def UpperCAmelCase__ ( self) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase__ ( self) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase__ ( self) -> str:
pass # TODO add if relevant
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''')
UpperCamelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''')
self.assertListEqual(
lowerCamelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
UpperCamelCase = {}
for i, token in enumerate(lowerCamelCase_):
UpperCamelCase = i
UpperCamelCase = CharacterTokenizer(vocab=lowerCamelCase_ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''])
self.assertListEqual(tokenizer.tokenize('''こんにちほ''') , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''')
UpperCamelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_)
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = '''cl-tohoku/bert-base-japanese'''
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertTokenizer.from_pretrained(lowerCamelCase_)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
UpperCamelCase = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase_)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''')) | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
SCREAMING_SNAKE_CASE_ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_ = logging.getLogger()
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase = parser.parse_args()
return args.f
def __snake_case ( _lowercase ,_lowercase="eval" ):
"""simple docstring"""
UpperCamelCase = os.path.join(_lowercase ,f'{split}_results.json' )
if os.path.exists(_lowercase ):
with open(_lowercase ,'''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f'can\'t find {path}' )
SCREAMING_SNAKE_CASE_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_):
run_flax_glue.main()
UpperCamelCase = get_results(lowerCamelCase_)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
@slow
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_):
run_clm_flax.main()
UpperCamelCase = get_results(lowerCamelCase_)
self.assertLess(result['''eval_perplexity'''] , 1_0_0)
@slow
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_):
run_summarization_flax.main()
UpperCamelCase = get_results(lowerCamelCase_ , split='''test''')
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0)
self.assertGreaterEqual(result['''test_rouge2'''] , 2)
self.assertGreaterEqual(result['''test_rougeL'''] , 7)
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7)
@slow
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_):
run_mlm_flax.main()
UpperCamelCase = get_results(lowerCamelCase_)
self.assertLess(result['''eval_perplexity'''] , 4_2)
@slow
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_):
run_ta_mlm_flax.main()
UpperCamelCase = get_results(lowerCamelCase_)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42)
@slow
def UpperCAmelCase__ ( self) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase = 7 if get_gpu_count() > 1 else 2
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_):
run_flax_ner.main()
UpperCamelCase = get_results(lowerCamelCase_)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
self.assertGreaterEqual(result['''eval_f1'''] , 0.3)
@slow
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_):
run_qa.main()
UpperCamelCase = get_results(lowerCamelCase_)
self.assertGreaterEqual(result['''eval_f1'''] , 3_0)
self.assertGreaterEqual(result['''eval_exact'''] , 3_0) | 34 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE_ = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE_ = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE_ = 'adapter_config.json'
SCREAMING_SNAKE_CASE_ = 'adapter_model.bin'
SCREAMING_SNAKE_CASE_ = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE_ = 'tf_model.h5'
SCREAMING_SNAKE_CASE_ = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE_ = 'model.ckpt'
SCREAMING_SNAKE_CASE_ = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE_ = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE_ = 'model.safetensors'
SCREAMING_SNAKE_CASE_ = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE_ = 'config.json'
SCREAMING_SNAKE_CASE_ = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE_ = 'generation_config.json'
SCREAMING_SNAKE_CASE_ = 'modelcard.json'
SCREAMING_SNAKE_CASE_ = '▁'
SCREAMING_SNAKE_CASE_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __snake_case ( _lowercase ):
"""simple docstring"""
if version.parse(_lowercase ) < version.parse(_lowercase ):
if "dev" in min_version:
UpperCamelCase = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
UpperCamelCase = f'This example requires a minimum version of {min_version},'
error_message += f' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' ) | 34 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __snake_case ( _lowercase ,_lowercase=1.0 ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if rng is None:
UpperCamelCase = global_rng
UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_0_0 , lowerCamelCase_=2_0_0_0 , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=1_2_8 , lowerCamelCase_=1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=3_0 , lowerCamelCase_=4_4_1_0_0 , ) -> List[str]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = min_seq_length
UpperCamelCase = max_seq_length
UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase = spectrogram_length
UpperCamelCase = feature_size
UpperCamelCase = num_audio_channels
UpperCamelCase = hop_length
UpperCamelCase = chunk_length
UpperCamelCase = sampling_rate
def UpperCAmelCase__ ( self) -> List[str]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self , lowerCamelCase_=False , lowerCamelCase_=False) -> Tuple:
def _flatten(lowerCamelCase_):
return list(itertools.chain(*lowerCamelCase_))
if equal_length:
UpperCamelCase = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
UpperCamelCase = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
UpperCamelCase = [np.asarray(lowerCamelCase_) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = TvltFeatureExtractor
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = TvltFeatureExtractionTester(self)
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCamelCase_ , '''spectrogram_length'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''feature_size'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''num_audio_channels'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''hop_length'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''chunk_length'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''sampling_rate'''))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = feat_extract_first.save_pretrained(lowerCamelCase_)[0]
check_json_file_has_correct_format(lowerCamelCase_)
UpperCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase_)
UpperCamelCase = feat_extract_first.to_dict()
UpperCamelCase = feat_extract_second.to_dict()
UpperCamelCase = dict_first.pop('''mel_filters''')
UpperCamelCase = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_))
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowerCamelCase_)
UpperCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase_)
UpperCamelCase = feat_extract_first.to_dict()
UpperCamelCase = feat_extract_second.to_dict()
UpperCamelCase = dict_first.pop('''mel_filters''')
UpperCamelCase = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_))
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[int]:
# Initialize feature_extractor
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
UpperCamelCase = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
UpperCamelCase = [np.asarray(lowerCamelCase_) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
UpperCamelCase = feature_extractor(lowerCamelCase_ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
UpperCamelCase = feature_extractor(
lowerCamelCase_ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowerCamelCase_).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
UpperCamelCase = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase = np.asarray(lowerCamelCase_)
UpperCamelCase = feature_extractor(lowerCamelCase_ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[Any]:
UpperCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
UpperCamelCase = ds.sort('''id''').select(range(lowerCamelCase_))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self._load_datasamples(1)
UpperCamelCase = TvltFeatureExtractor()
UpperCamelCase = feature_extractor(lowerCamelCase_ , return_tensors='''pt''').audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8))
UpperCamelCase = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4)) | 34 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 1 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=2_4 , lowerCamelCase_=2 , lowerCamelCase_=6 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=None , lowerCamelCase_=1_0_0_0 , ) -> Optional[Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Any:
UpperCamelCase = LiltModel(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_)
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_)
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Tuple:
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
return True
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = LiltModelTester(self)
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7)
def UpperCAmelCase__ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_)
@slow
def UpperCAmelCase__ ( self) -> List[str]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_)
self.assertIsNotNone(lowerCamelCase_)
@require_torch
@slow
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowerCamelCase_)
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_)
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_)
UpperCamelCase = torch.Size([1, 2, 7_6_8])
UpperCamelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1e-3)) | 34 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''deberta-v2'''
def __init__( self , lowerCamelCase_=1_2_8_1_0_0 , lowerCamelCase_=1_5_3_6 , lowerCamelCase_=2_4 , lowerCamelCase_=2_4 , lowerCamelCase_=6_1_4_4 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=0 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-7 , lowerCamelCase_=False , lowerCamelCase_=-1 , lowerCamelCase_=0 , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=0 , lowerCamelCase_="gelu" , **lowerCamelCase_ , ) -> Optional[int]:
super().__init__(**lowerCamelCase_)
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = relative_attention
UpperCamelCase = max_relative_positions
UpperCamelCase = pad_token_id
UpperCamelCase = position_biased_input
# Backwards compatibility
if type(lowerCamelCase_) == str:
UpperCamelCase = [x.strip() for x in pos_att_type.lower().split('''|''')]
UpperCamelCase = pos_att_type
UpperCamelCase = vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = kwargs.get('''pooler_hidden_size''' , lowerCamelCase_)
UpperCamelCase = pooler_dropout
UpperCamelCase = pooler_hidden_act
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)])
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)])
@property
def UpperCAmelCase__ ( self) -> int:
return 1_2
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = -1 , lowerCamelCase_ = -1 , lowerCamelCase_ = -1 , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = 3 , lowerCamelCase_ = 4_0 , lowerCamelCase_ = 4_0 , lowerCamelCase_ = None , ) -> Mapping[str, Any]:
UpperCamelCase = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 34 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 1 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 32
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase = DatasetDict(
{
'''train''': dataset['''train'''].select(_lowercase ),
'''validation''': dataset['''train'''].select(_lowercase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_lowercase ,max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
_lowercase ,batched=_lowercase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
_lowercase ,padding='''longest''' ,max_length=_lowercase ,pad_to_multiple_of=_lowercase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_lowercase ,collate_fn=_lowercase ,batch_size=_lowercase )
UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_lowercase ,collate_fn=_lowercase ,batch_size=_lowercase )
UpperCamelCase = DataLoader(
tokenized_datasets['''test'''] ,shuffle=_lowercase ,collate_fn=_lowercase ,batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = []
# Download the dataset
UpperCamelCase = load_dataset('''glue''' ,'''mrpc''' )
# Create our splits
UpperCamelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['''lr''']
UpperCamelCase = int(config['''num_epochs'''] )
UpperCamelCase = int(config['''seed'''] )
UpperCamelCase = int(config['''batch_size'''] )
UpperCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
UpperCamelCase = kfold.split(np.zeros(datasets['''train'''].num_rows ) ,datasets['''train''']['''label'''] )
UpperCamelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = get_fold_dataloaders(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() ,lr=_lowercase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase ,num_warmup_steps=100 ,num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_lowercase ,references=_lowercase ,)
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' ,_lowercase )
# New Code #
# We also run predictions on the test set at the very end
UpperCamelCase = []
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCamelCase = torch.cat(_lowercase ,dim=0 )
UpperCamelCase = torch.stack(_lowercase ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCamelCase = metric.compute(predictions=_lowercase ,references=_lowercase )
accelerator.print('''Average test metrics from all folds:''' ,_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=_lowercase ,default=_lowercase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' ,type=_lowercase ,default=3 ,help='''The number of splits to perform across the dataset''' )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase ,_lowercase )
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE_ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''esm'''
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_2_6 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(pad_token_id=lowerCamelCase_ , mask_token_id=lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = emb_layer_norm_before
UpperCamelCase = token_dropout
UpperCamelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''')
UpperCamelCase = EsmFoldConfig()
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = EsmFoldConfig(**lowerCamelCase_)
UpperCamelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''')
UpperCamelCase = get_default_vocab_list()
else:
UpperCamelCase = vocab_list
else:
UpperCamelCase = None
UpperCamelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , lowerCamelCase_):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''')
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = super().to_dict()
if isinstance(self.esmfold_config , lowerCamelCase_):
UpperCamelCase = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = None
A_ = True
A_ = False
A_ = False
A_ = False
A_ = 0
A_ = True
A_ = False
A_ = 128
A_ = None
def UpperCAmelCase__ ( self) -> Tuple:
if self.trunk is None:
UpperCamelCase = TrunkConfig()
elif isinstance(self.trunk , lowerCamelCase_):
UpperCamelCase = TrunkConfig(**self.trunk)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = asdict(self)
UpperCamelCase = self.trunk.to_dict()
return output
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 48
A_ = 1_024
A_ = 128
A_ = 32
A_ = 32
A_ = 32
A_ = 0
A_ = 0
A_ = False
A_ = 4
A_ = 128
A_ = None
def UpperCAmelCase__ ( self) -> Tuple:
if self.structure_module is None:
UpperCamelCase = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCamelCase_):
UpperCamelCase = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.')
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F' {self.sequence_state_dim} and {self.sequence_state_dim}.')
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.')
UpperCamelCase = self.sequence_state_dim // self.sequence_head_width
UpperCamelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.')
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.')
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.')
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.')
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = asdict(self)
UpperCamelCase = self.structure_module.to_dict()
return output
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 384
A_ = 128
A_ = 16
A_ = 128
A_ = 12
A_ = 4
A_ = 8
A_ = 0.1
A_ = 8
A_ = 1
A_ = 2
A_ = 7
A_ = 10
A_ = 1E-8
A_ = 1E5
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return asdict(self)
def __snake_case ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
) | 34 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''input_features''', '''is_longer''']
def __init__( self , lowerCamelCase_=6_4 , lowerCamelCase_=4_8_0_0_0 , lowerCamelCase_=4_8_0 , lowerCamelCase_=1_0 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_ = 0 , lowerCamelCase_ = 1_4_0_0_0 , lowerCamelCase_ = None , lowerCamelCase_ = "fusion" , lowerCamelCase_ = "repeatpad" , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = top_db
UpperCamelCase = truncation
UpperCamelCase = padding
UpperCamelCase = fft_window_size
UpperCamelCase = (fft_window_size >> 1) + 1
UpperCamelCase = hop_length
UpperCamelCase = max_length_s
UpperCamelCase = max_length_s * sampling_rate
UpperCamelCase = sampling_rate
UpperCamelCase = frequency_min
UpperCamelCase = frequency_max
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm=lowerCamelCase_ , mel_scale='''htk''' , )
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCAmelCase__ ( self) -> Dict[str, Any]:
UpperCamelCase = copy.deepcopy(self.__dict__)
UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> np.ndarray:
UpperCamelCase = spectrogram(
lowerCamelCase_ , window_function(self.fft_window_size , '''hann''') , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase = [0]
# randomly choose index for each part
UpperCamelCase = np.random.choice(ranges[0])
UpperCamelCase = np.random.choice(ranges[1])
UpperCamelCase = np.random.choice(ranges[2])
UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
UpperCamelCase = torch.tensor(mel[None, None, :])
UpperCamelCase = torch.nn.functional.interpolate(
lowerCamelCase_ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=lowerCamelCase_)
UpperCamelCase = mel_shrink[0][0].numpy()
UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCamelCase = len(lowerCamelCase_) - max_length
UpperCamelCase = np.random.randint(0 , overflow + 1)
UpperCamelCase = waveform[idx : idx + max_length]
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters)
UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0)
UpperCamelCase = False
else:
UpperCamelCase = self._random_mel_fusion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented')
else:
UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCamelCase = int(max_length / len(lowerCamelCase_))
UpperCamelCase = np.stack(np.tile(lowerCamelCase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
UpperCamelCase = int(max_length / len(lowerCamelCase_))
UpperCamelCase = np.stack(np.tile(lowerCamelCase_ , lowerCamelCase_))
UpperCamelCase = np.pad(lowerCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0)
if truncation == "fusion":
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters)
UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> BatchFeature:
UpperCamelCase = truncation if truncation is not None else self.truncation
UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
UpperCamelCase = isinstance(lowerCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
UpperCamelCase = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
UpperCamelCase = [np.asarray(lowerCamelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray):
UpperCamelCase = np.asarray(lowerCamelCase_ , dtype=np.floataa)
elif isinstance(lowerCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
UpperCamelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray(lowerCamelCase_)]
# convert to mel spectrogram, truncate and pad if needed.
UpperCamelCase = [
self._get_input_mel(lowerCamelCase_ , max_length if max_length else self.nb_max_samples , lowerCamelCase_ , lowerCamelCase_)
for waveform in raw_speech
]
UpperCamelCase = []
UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase_)
is_longer.append(lowerCamelCase_)
if truncation == "fusion" and sum(lowerCamelCase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCamelCase = np.random.randint(0 , len(lowerCamelCase_))
UpperCamelCase = True
if isinstance(input_mel[0] , lowerCamelCase_):
UpperCamelCase = [np.asarray(lowerCamelCase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
UpperCamelCase = [[longer] for longer in is_longer]
UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
UpperCamelCase = BatchFeature(lowerCamelCase_)
if return_tensors is not None:
UpperCamelCase = input_features.convert_to_tensors(lowerCamelCase_)
return input_features | 34 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42 # [batch_size x 3]
A_ = 42 # [batch_size x 3]
A_ = 42 # [batch_size x 3]
A_ = 42 # [batch_size x 3]
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = 42
def UpperCAmelCase__ ( self) -> List[Any]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def UpperCAmelCase__ ( self) -> Optional[int]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa))
def UpperCAmelCase__ ( self) -> Any:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa))
def UpperCAmelCase__ ( self) -> torch.Tensor:
UpperCamelCase = torch.arange(self.height * self.width)
UpperCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase_ , self.width , rounding_mode='''trunc'''),
] , axis=1 , )
return coords
@property
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase , *UpperCamelCase = self.shape
UpperCamelCase = int(np.prod(lowerCamelCase_))
UpperCamelCase = self.get_image_coords()
UpperCamelCase = torch.broadcast_to(coords.unsqueeze(0) , [batch_size * inner_batch_size, *coords.shape])
UpperCamelCase = self.get_camera_rays(lowerCamelCase_)
UpperCamelCase = rays.view(lowerCamelCase_ , inner_batch_size * self.height * self.width , 2 , 3)
return rays
def UpperCAmelCase__ ( self , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase , *UpperCamelCase , UpperCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCamelCase = coords.view(lowerCamelCase_ , -1 , 2)
UpperCamelCase = self.resolution()
UpperCamelCase = self.fov()
UpperCamelCase = (flat.float() / (res - 1)) * 2 - 1
UpperCamelCase = fracs * torch.tan(fov / 2)
UpperCamelCase = fracs.view(lowerCamelCase_ , -1 , 2)
UpperCamelCase = (
self.z.view(lowerCamelCase_ , 1 , 3)
+ self.x.view(lowerCamelCase_ , 1 , 3) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase_ , 1 , 3) * fracs[:, :, 1:]
)
UpperCamelCase = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase_)
UpperCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase_ , 1 , 3) , [batch_size, directions.shape[1], 3]),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase_ , *lowerCamelCase_ , 2 , 3)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase_ , height=lowerCamelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
UpperCamelCase = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCamelCase = -z * 4
UpperCamelCase = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
UpperCamelCase = np.cross(_lowercase ,_lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(_lowercase ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(_lowercase ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(_lowercase ,axis=0 ) ).float() ,width=_lowercase ,height=_lowercase ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(_lowercase )) ,) | 34 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 1 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __snake_case ( _lowercase ):
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 34 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ):
"""simple docstring"""
if "resnet-50" in model_name:
UpperCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCamelCase = DetrConfig(use_timm_backbone=_lowercase ,backbone_config=_lowercase )
# set label attributes
UpperCamelCase = '''panoptic''' in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = '''huggingface/label-files'''
UpperCamelCase = '''coco-detection-id2label.json'''
UpperCamelCase = json.load(open(hf_hub_download(_lowercase ,_lowercase ,repo_type='''dataset''' ) ,'''r''' ) )
UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
UpperCamelCase = ''''''
if is_panoptic:
UpperCamelCase = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
return im
@torch.no_grad()
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = get_detr_config(_lowercase )
# load original model from torch hub
UpperCamelCase = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
UpperCamelCase = torch.hub.load('''facebookresearch/detr''' ,model_name_to_original_name[model_name] ,pretrained=_lowercase ).eval()
UpperCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_lowercase ):
if is_panoptic:
UpperCamelCase = '''detr.''' + src
rename_key(_lowercase ,_lowercase ,_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase ,is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = DetrForSegmentation(_lowercase ) if is_panoptic else DetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion on an image
UpperCamelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCamelCase = DetrImageProcessor(format=_lowercase )
UpperCamelCase = processor(images=prepare_img() ,return_tensors='''pt''' )
UpperCamelCase = encoding['''pixel_values''']
UpperCamelCase = detr(_lowercase )
UpperCamelCase = model(_lowercase )
assert torch.allclose(outputs.logits ,original_outputs['''pred_logits'''] ,atol=1e-3 )
assert torch.allclose(outputs.pred_boxes ,original_outputs['''pred_boxes'''] ,atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs['''pred_masks'''] ,atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''vqvae''']
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[Any]:
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , mel=lowerCamelCase_ , vqvae=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
return 5_0 if isinstance(self.scheduler , lowerCamelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowerCamelCase_ = 1 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCamelCase_)
UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCamelCase_ , device=self.device , )
UpperCamelCase = noise
UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = self.mel.audio_slice_to_image(lowerCamelCase_)
UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
UpperCamelCase = (input_image / 2_5_5) * 2 - 1
UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
UpperCamelCase = self.vqvae.encode(torch.unsqueeze(lowerCamelCase_ , 0)).latent_dist.sample(
generator=lowerCamelCase_)[0]
UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCamelCase = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , self.scheduler.timesteps[start_step - 1])
UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCamelCase = int(mask_start_secs * pixels_per_second)
UpperCamelCase = int(mask_end_secs * pixels_per_second)
UpperCamelCase = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , lowerCamelCase_):
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)['''sample''']
else:
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_)['''sample''']
if isinstance(self.scheduler , lowerCamelCase_):
UpperCamelCase = self.scheduler.step(
model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , )['''prev_sample''']
else:
UpperCamelCase = self.scheduler.step(
model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , generator=lowerCamelCase_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
UpperCamelCase = self.vqvae.decode(lowerCamelCase_)['''sample''']
UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
UpperCamelCase = (images * 2_5_5).round().astype('''uint8''')
UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCamelCase_ , mode='''RGB''').convert('''L''') for _ in images))
UpperCamelCase = [self.mel.image_to_audio(lowerCamelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCamelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(lowerCamelCase_))
@torch.no_grad()
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = 5_0) -> np.ndarray:
assert isinstance(self.scheduler , lowerCamelCase_)
self.scheduler.set_timesteps(lowerCamelCase_)
UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
UpperCamelCase = (sample / 2_5_5) * 2 - 1
UpperCamelCase = torch.Tensor(lowerCamelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCamelCase = self.scheduler.alphas_cumprod[t]
UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_)['''sample''']
UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase = acos(torch.dot(torch.flatten(lowerCamelCase_) , torch.flatten(lowerCamelCase_)) / torch.norm(lowerCamelCase_) / torch.norm(lowerCamelCase_))
return sin((1 - alpha) * theta) * xa / sin(lowerCamelCase_) + sin(alpha * theta) * xa / sin(lowerCamelCase_) | 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 1 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('\n'.join(upper_files) + '\n')
SCREAMING_SNAKE_CASE_ = [file for file in filepaths if ' ' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('\n'.join(space_files) + '\n')
SCREAMING_SNAKE_CASE_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('\n'.join(hyphen_files) + '\n')
SCREAMING_SNAKE_CASE_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('\n'.join(nodir_files) + '\n')
SCREAMING_SNAKE_CASE_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A_ = Features({'''image''': Image()} )
A_ = Features({'''labels''': ClassLabel} )
A_ = "image"
A_ = "labels"
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.')
if not isinstance(features[self.label_column] , lowerCamelCase_):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.')
UpperCamelCase = copy.deepcopy(self)
UpperCamelCase = self.label_schema.copy()
UpperCamelCase = features[self.label_column]
UpperCamelCase = label_schema
return task_template
@property
def UpperCAmelCase__ ( self) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
} | 34 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase = 100 ):
"""simple docstring"""
UpperCamelCase = set()
UpperCamelCase = 0
UpperCamelCase = n + 1 # maximum limit
for a in range(2 ,_lowercase ):
for b in range(2 ,_lowercase ):
UpperCamelCase = a**b # calculates the current power
collect_powers.add(_lowercase ) # adds the result to the set
return len(_lowercase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip()))) | 34 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 1 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''layoutlmv3'''
def __init__( self , lowerCamelCase_=5_0_2_6_5 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_2_8 , lowerCamelCase_=True , lowerCamelCase_=3_2 , lowerCamelCase_=1_2_8 , lowerCamelCase_=6_4 , lowerCamelCase_=2_5_6 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=2_2_4 , lowerCamelCase_=3 , lowerCamelCase_=1_6 , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = max_ad_position_embeddings
UpperCamelCase = coordinate_size
UpperCamelCase = shape_size
UpperCamelCase = has_relative_attention_bias
UpperCamelCase = rel_pos_bins
UpperCamelCase = max_rel_pos
UpperCamelCase = has_spatial_attention_bias
UpperCamelCase = rel_ad_pos_bins
UpperCamelCase = max_rel_ad_pos
UpperCamelCase = text_embed
UpperCamelCase = visual_embed
UpperCamelCase = input_size
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = classifier_dropout
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = version.parse('''1.12''' )
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
])
@property
def UpperCAmelCase__ ( self) -> float:
return 1e-5
@property
def UpperCAmelCase__ ( self) -> int:
return 1_2
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = -1 , lowerCamelCase_ = -1 , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = 3 , lowerCamelCase_ = 4_0 , lowerCamelCase_ = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_)
UpperCamelCase = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_)
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [[''' '''.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ))
return inputs | 34 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 1 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE_ = 'pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE_ = 'tf'
else:
SCREAMING_SNAKE_CASE_ = 'jax'
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = PerceiverTokenizer
A_ = False
def UpperCAmelCase__ ( self) -> Optional[int]:
super().setUp()
UpperCamelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def UpperCAmelCase__ ( self) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''')
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=2_0 , lowerCamelCase_=5) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCamelCase = []
for i in range(len(lowerCamelCase_)):
try:
UpperCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_)
except UnicodeDecodeError:
pass
toks.append((i, tok))
UpperCamelCase = list(filter(lambda lowerCamelCase_: re.match(R'''^[ a-zA-Z]+$''' , t[1]) , lowerCamelCase_))
UpperCamelCase = list(filter(lambda lowerCamelCase_: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase_) , lowerCamelCase_))
if max_length is not None and len(lowerCamelCase_) > max_length:
UpperCamelCase = toks[:max_length]
if min_length is not None and len(lowerCamelCase_) < min_length and len(lowerCamelCase_) > 0:
while len(lowerCamelCase_) < min_length:
UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_)
if " " not in output_txt and len(lowerCamelCase_) > 1:
UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_)
)
if with_prefix_space:
UpperCamelCase = ''' ''' + output_txt
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
return output_txt, output_ids
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.perceiver_tokenizer
UpperCamelCase = '''Unicode €.'''
UpperCamelCase = tokenizer(lowerCamelCase_)
UpperCamelCase = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_)
# decoding
UpperCamelCase = tokenizer.decode(lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , '''[CLS]Unicode €.[SEP]''')
UpperCamelCase = tokenizer('''e è é ê ë''')
UpperCamelCase = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_)
# decoding
UpperCamelCase = tokenizer.decode(lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , '''[CLS]e è é ê ë[SEP]''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''[CLS]e è é ê ë[SEP]''')
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.perceiver_tokenizer
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCamelCase = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
if FRAMEWORK != "jax":
UpperCamelCase = list(batch.input_ids.numpy()[0])
else:
UpperCamelCase = list(batch.input_ids.tolist()[0])
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual((2, 3_8) , batch.input_ids.shape)
self.assertEqual((2, 3_8) , batch.attention_mask.shape)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = self.perceiver_tokenizer
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCamelCase_)
self.assertIn('''attention_mask''' , lowerCamelCase_)
self.assertNotIn('''decoder_input_ids''' , lowerCamelCase_)
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.perceiver_tokenizer
UpperCamelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCamelCase = tokenizer(
text_target=lowerCamelCase_ , max_length=3_2 , padding='''max_length''' , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertEqual(3_2 , targets['''input_ids'''].shape[1])
def UpperCAmelCase__ ( self) -> Optional[int]:
# safety check on max_len default value so we are sure the test works
UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length , 4_2)
# Now let's start the test
UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = ''' He is very happy, UNwant\u00E9d,running'''
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
tokenizer.save_pretrained(lowerCamelCase_)
UpperCamelCase = tokenizer.__class__.from_pretrained(lowerCamelCase_)
UpperCamelCase = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
shutil.rmtree(lowerCamelCase_)
UpperCamelCase = self.get_tokenizers(model_max_length=4_2)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
UpperCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
tokenizer.save_pretrained(lowerCamelCase_)
UpperCamelCase = tokenizer.__class__.from_pretrained(lowerCamelCase_)
UpperCamelCase = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 4_2)
UpperCamelCase = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=4_3)
self.assertEqual(tokenizer.model_max_length , 4_3)
shutil.rmtree(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_)
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
UpperCamelCase = json.load(lowerCamelCase_)
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
UpperCamelCase = json.load(lowerCamelCase_)
UpperCamelCase = [F'<extra_id_{i}>' for i in range(1_2_5)]
UpperCamelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCamelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_)
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase = tokenizer_class.from_pretrained(
lowerCamelCase_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCamelCase_)]
UpperCamelCase = tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8]) , '''�''')
def UpperCAmelCase__ ( self) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self) -> Union[str, Any]:
pass
def UpperCAmelCase__ ( self) -> List[Any]:
pass
def UpperCAmelCase__ ( self) -> Tuple:
pass
def UpperCAmelCase__ ( self) -> Optional[int]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCamelCase = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
UpperCamelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCamelCase = tokenizer.convert_tokens_to_string(lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_) | 34 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __snake_case ( _lowercase ):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_lowercase ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
UpperCamelCase = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
UpperCamelCase = PipelineDataFormat.from_str(
format=_lowercase ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(_lowercase ,_lowercase )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase = nlp
UpperCamelCase = reader
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> Any:
UpperCamelCase = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''')
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''')
run_parser.add_argument('''--input''' , type=lowerCamelCase_ , help='''Path to the file to use for inference''')
run_parser.add_argument('''--output''' , type=lowerCamelCase_ , help='''Path to the file that will be used post to write results.''')
run_parser.add_argument('''--model''' , type=lowerCamelCase_ , help='''Name or path to the model to instantiate.''')
run_parser.add_argument('''--config''' , type=lowerCamelCase_ , help='''Name or path to the model\'s config to instantiate.''')
run_parser.add_argument(
'''--tokenizer''' , type=lowerCamelCase_ , help='''Name of the tokenizer to use. (default: same as the model name)''')
run_parser.add_argument(
'''--column''' , type=lowerCamelCase_ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=lowerCamelCase_ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=lowerCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''')
run_parser.set_defaults(func=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase , UpperCamelCase = self._nlp, []
for entry in self._reader:
UpperCamelCase = nlp(**lowerCamelCase_) if self._reader.is_multi_columns else nlp(lowerCamelCase_)
if isinstance(lowerCamelCase_ , lowerCamelCase_):
outputs.append(lowerCamelCase_)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase = self._reader.save_binary(lowerCamelCase_)
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}')
else:
self._reader.save(lowerCamelCase_) | 34 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 42
A_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline | 34 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('fixtures/test_sentencepiece.model')
SCREAMING_SNAKE_CASE_ = {'target_lang': 'fi', 'source_lang': 'en'}
SCREAMING_SNAKE_CASE_ = '>>zh<<'
SCREAMING_SNAKE_CASE_ = 'Helsinki-NLP/'
if is_torch_available():
SCREAMING_SNAKE_CASE_ = 'pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE_ = 'tf'
else:
SCREAMING_SNAKE_CASE_ = 'jax'
@require_sentencepiece
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = MarianTokenizer
A_ = False
A_ = True
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().setUp()
UpperCamelCase = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = Path(self.tmpdirname)
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''])
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''])
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''])
UpperCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = '''</s>'''
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_) , lowerCamelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(lowerCamelCase_) , 9)
def UpperCAmelCase__ ( self) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de')
UpperCamelCase = en_de_tokenizer(['''I am a small frog'''] , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(lowerCamelCase_ , batch.input_ids[0])
UpperCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCamelCase_)
UpperCamelCase = [x.name for x in Path(lowerCamelCase_).glob('''*''')]
self.assertIn('''source.spm''' , lowerCamelCase_)
MarianTokenizer.from_pretrained(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tok(
['''I am a small frog''' * 1_0_0_0, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0))
@slow
def UpperCAmelCase__ ( self) -> List[str]:
# fmt: off
UpperCamelCase = {'''input_ids''': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''')
UpperCamelCase = '''Tämä on testi'''
UpperCamelCase = '''This is a test'''
UpperCamelCase = [7_6, 7, 2_0_4_7, 2]
UpperCamelCase = [6_9, 1_2, 1_1, 9_4_0, 2]
UpperCamelCase = tokenizer(lowerCamelCase_).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokenizer(text_target=lowerCamelCase_).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_) | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.