code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Tuple ='linear'
a_ :Optional[Any] ='cosine'
a_ :Optional[int] ='cosine_with_restarts'
a_ :Optional[Any] ='polynomial'
a_ :Optional[int] ='constant'
a_ :Dict ='constant_with_warmup'
a_ :Optional[int] ='piecewise_constant'
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__A , lambda __SCREAMING_SNAKE_CASE : 1 , last_epoch=__A )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
"""simple docstring"""
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1.0 , __A ) )
return 1.0
return LambdaLR(__A , __A , last_epoch=__A )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 ) -> int:
"""simple docstring"""
__a = {}
__a = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a = rule_str.split(""":""" )
__a = int(__A )
__a = float(__A )
__a = value
__a = float(rule_list[-1] )
def create_rules_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def rule_func(__SCREAMING_SNAKE_CASE ) -> float:
__a = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a = create_rules_function(__A , __A )
return LambdaLR(__A , __A , last_epoch=__A )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=-1 ) -> str:
"""simple docstring"""
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__A , __A , __A )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.5 , __SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
"""simple docstring"""
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
__a = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__A ) * 2.0 * progress )) )
return LambdaLR(__A , __A , __A )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = -1 ) -> Any:
"""simple docstring"""
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
__a = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__A ) * progress) % 1.0) )) )
return LambdaLR(__A , __A , __A )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1E-7 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=-1 ) -> Optional[int]:
"""simple docstring"""
__a = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a = lr_init - lr_end
__a = num_training_steps - num_warmup_steps
__a = 1 - (current_step - num_warmup_steps) / decay_steps
__a = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__A , __A , __A )
SCREAMING_SNAKE_CASE_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
"""simple docstring"""
__a = SchedulerType(__A )
__a = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__A , last_epoch=__A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__A , step_rules=__A , last_epoch=__A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__A , num_warmup_steps=__A , last_epoch=__A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , num_cycles=__A , last_epoch=__A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , power=__A , last_epoch=__A , )
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , last_epoch=__A )
| 582 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = len(A )
_a = [0] * len_array
if len_array > 0:
_a = array[0]
for i in range(1 , A ):
_a = self.prefix_sum[i - 1] + array[i]
def a__ (self , A , A ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self , A ) -> bool:
"""simple docstring"""
_a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Tuple = False ) -> str:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
A__ = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
A__ = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__A , 1 ):
if n < _p:
# then we have our last prime to check
A__ = primes[:idx]
break
A__ , A__ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
A__ = False
for r in range(__A ):
A__ = pow(__A , d * 2**r , __A )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
A__ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 514 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCAmelCase_ , )
super().__init__(args=lowerCAmelCase_ , **lowerCAmelCase_ )
| 393 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_A = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a__ ( lowerCAmelCase ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
return max(metric_fn(__A , __A ) for gt in ground_truths )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = [line.strip() for line in open(__A , """r""" ).readlines()]
UpperCAmelCase__ : Tuple = []
if args.gold_data_mode == "qa":
UpperCAmelCase__ : Optional[int] = pd.read_csv(__A , sep="""\t""" , header=__A )
for answer_list in data[1]:
UpperCAmelCase__ : Union[str, Any] = ast.literal_eval(__A )
answers.append(__A )
else:
UpperCAmelCase__ : Any = [line.strip() for line in open(__A , """r""" ).readlines()]
UpperCAmelCase__ : str = [[reference] for reference in references]
UpperCAmelCase__ : List[str] = 0
for prediction, ground_truths in zip(__A , __A ):
total += 1
em += metric_max_over_ground_truths(__A , __A , __A )
fa += metric_max_over_ground_truths(__A , __A , __A )
UpperCAmelCase__ : List[str] = 1_00.0 * em / total
UpperCAmelCase__ : Tuple = 1_00.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
UpperCAmelCase__ : List[Any] = args.k
UpperCAmelCase__ : Union[str, Any] = [line.strip() for line in open(__A , """r""" ).readlines()]
UpperCAmelCase__ : int = [line.strip() for line in open(__A , """r""" ).readlines()]
UpperCAmelCase__ : Any = 0
for hypo, reference in zip(__A , __A ):
UpperCAmelCase__ : Tuple = set(hypo.split("""\t""" )[:k] )
UpperCAmelCase__ : List[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCAmelCase__ : Union[str, Any] = 1_00.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
def strip_title(lowerCAmelCase ):
if title.startswith("""\"""" ):
UpperCAmelCase__ : Dict = title[1:]
if title.endswith("""\"""" ):
UpperCAmelCase__ : List[Any] = title[:-1]
return title
UpperCAmelCase__ : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__A , return_tensors="""pt""" , padding=__A , truncation=__A , )["""input_ids"""].to(args.device )
UpperCAmelCase__ : Union[str, Any] = rag_model.rag.question_encoder(__A )
UpperCAmelCase__ : Union[str, Any] = question_enc_outputs[0]
UpperCAmelCase__ : List[Any] = rag_model.retriever(
__A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
UpperCAmelCase__ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCAmelCase__ : List[Any] = []
for docs in all_docs:
UpperCAmelCase__ : Any = [strip_title(__A ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__A ) )
return provenance_strings
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
with torch.no_grad():
UpperCAmelCase__ : str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__A , return_tensors="""pt""" , padding=__A , truncation=__A )
UpperCAmelCase__ : List[str] = inputs_dict.input_ids.to(args.device )
UpperCAmelCase__ : str = inputs_dict.attention_mask.to(args.device )
UpperCAmelCase__ : List[str] = rag_model.generate( # rag_model overwrites generate
__A , attention_mask=__A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCAmelCase__ : Union[str, Any] = rag_model.retriever.generator_tokenizer.batch_decode(__A , skip_special_tokens=__A )
if args.print_predictions:
for q, a in zip(__A , __A ):
logger.info("""Q: {} - A: {}""".format(__A , __A ) )
return answers
def a__ ( ) -> str:
UpperCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__A , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__A , choices=["""exact""", """compressed""", """legacy"""] , type=__A , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__A , type=__A , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__A , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__A , type=__A , required=__A , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__A , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__A , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__A , type=__A , required=__A , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__A , type=__A , required=__A , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__A , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__A , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__A , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__A , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__A , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=__A , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
UpperCAmelCase__ : Tuple = parser.parse_args()
UpperCAmelCase__ : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def a__ ( lowerCAmelCase ) -> Optional[Any]:
UpperCAmelCase__ : Optional[int] = {}
if args.model_type is None:
UpperCAmelCase__ : Union[str, Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
UpperCAmelCase__ : Optional[int] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
UpperCAmelCase__ : Optional[Any] = args.n_docs
if args.index_name is not None:
UpperCAmelCase__ : Union[str, Any] = args.index_name
if args.index_path is not None:
UpperCAmelCase__ : List[str] = args.index_path
else:
UpperCAmelCase__ : str = BartForConditionalGeneration
UpperCAmelCase__ : List[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __A )
UpperCAmelCase__ : List[Any] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
UpperCAmelCase__ : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__A , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__A ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
UpperCAmelCase__ : Any = RagRetriever.from_pretrained(__A , **__A )
UpperCAmelCase__ : Any = model_class.from_pretrained(__A , retriever=__A , **__A )
model.retriever.init_retrieval()
else:
UpperCAmelCase__ : Dict = model_class.from_pretrained(__A , **__A )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
UpperCAmelCase__ : Any = []
for line in tqdm(__A ):
questions.append(line.strip() )
if len(__A ) == args.eval_batch_size:
UpperCAmelCase__ : Optional[Any] = evaluate_batch_fn(__A , __A , __A )
preds_file.write("""\n""".join(__A ) + """\n""" )
preds_file.flush()
UpperCAmelCase__ : Any = []
if len(__A ) > 0:
UpperCAmelCase__ : str = evaluate_batch_fn(__A , __A , __A )
preds_file.write("""\n""".join(__A ) )
preds_file.flush()
score_fn(__A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_A = get_args()
main(args)
| 182 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if os.path.exists(__A):
if os.path.exists(os.path.join(__A , '''config.json''')) and os.path.isfile(
os.path.join(__A , '''config.json''')):
os.remove(os.path.join(__A , '''config.json'''))
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''')) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''')):
os.remove(os.path.join(__A , '''pytorch_model.bin'''))
else:
os.makedirs(__A)
model.save_pretrained(__A)
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
_a = 2
if unlogit:
_a = torch.pow(__A , __A)
_a = p * torch.log(__A)
_a = 0
return -plogp.sum(dim=-1)
def lowerCAmelCase (__A):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__A))))
for row in range(len(__A)):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data))
def lowerCAmelCase (__A , __A , __A , __A=True , __A=True , __A=None , __A=False):
"""simple docstring"""
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(__A , __A).to(args.device)
_a = torch.zeros(__A , __A).to(args.device)
if head_mask is None:
_a = torch.ones(__A , __A).to(args.device)
head_mask.requires_grad_(requires_grad=__A)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0])):
_a = tuple(t.to(args.device) for t in inputs)
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(__A , labels=__A , head_mask=__A)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A):
_a = entropy(attn.detach() , __A)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(__A , __A).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''')
print_ad_tensor(__A)
if compute_importance:
logger.info('''Head importance scores''')
print_ad_tensor(__A)
logger.info('''Head ranked by importance scores''')
_a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
_a = torch.arange(
head_importance.numel() , device=args.device)
_a = head_ranks.view_as(__A)
print_ad_tensor(__A)
return attn_entropy, head_importance, total_loss
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a , _a , _a = compute_heads_importance(__A , __A , __A , compute_entropy=__A)
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold)
_a = torch.ones_like(__A)
_a = max(1 , int(new_head_mask.numel() * args.masking_amount))
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('''Inf''')
_a = head_importance.view(-1).sort()[1]
if len(__A) <= num_to_mask:
print('''BREAK BY num_to_mask''')
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist()))
_a = new_head_mask.view(-1)
_a = 0.0
_a = new_head_mask.view_as(__A)
_a = new_head_mask.clone().detach()
print_ad_tensor(__A)
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A)
_a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''')
print_ad_tensor(__A)
np.save(os.path.join(args.output_dir , '''head_mask.npy''') , head_mask.detach().cpu().numpy())
return head_mask
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A)
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters())
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A))
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A):
_a = [
v,
]
assert sum(len(__A) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A)
_a = sum(p.numel() for p in model.parameters())
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A)
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100)
save_model(__A , args.output_dir)
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''')
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''')
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''')
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''')
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''')
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''')
parser.add_argument('''--seed''' , type=__A , default=42)
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''')
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''')
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''')
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
_a = torch.device('''cuda''' , args.local_rank)
_a = 1
torch.distributed.init_process_group(backend='''nccl''') # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1)))
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A)
elif args.n_gpu > 1:
_a = nn.DataParallel(__A)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A)
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin'''))
logger.info('''Training/evaluation parameters %s''' , __A)
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
_a = (torch.from_numpy(__A),)
_a = TensorDataset(*__A)
_a = RandomSampler(__A)
_a = DataLoader(__A , sampler=__A , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(__A , __A , __A)
prune_heads(__A , __A , __A , __A)
if __name__ == "__main__":
main()
| 11 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
_snake_case : List[Any] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
_snake_case : Union[str, Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
__lowerCAmelCase = set(__A )
return pairs
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[int]="</s>" , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : int , ) -> Optional[Any]:
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = merges_file
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = 3
self.add_from_file(lowerCAmelCase_ )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split('\n' )[:-1]
__lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
__lowerCAmelCase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
__lowerCAmelCase = {}
def lowercase ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Any = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
return len(self.encoder )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Tuple ) -> Tuple:
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__lowerCAmelCase = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
__lowerCAmelCase = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(lowerCAmelCase_ ):
try:
__lowerCAmelCase = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
__lowerCAmelCase = get_pairs(lowerCAmelCase_ )
__lowerCAmelCase = '@@ '.join(lowerCAmelCase_ )
__lowerCAmelCase = word[:-4]
__lowerCAmelCase = word
return word
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] ) -> str:
__lowerCAmelCase = []
__lowerCAmelCase = re.findall(R'\S+\n?' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(' ' ) ) )
return split_tokens
def lowercase ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase ( self : Any , lowerCAmelCase_ : int ) -> Any:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> int:
__lowerCAmelCase = ' '.join(lowerCAmelCase_ ).replace('@@ ' , '' ).strip()
return out_string
def lowercase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
__lowerCAmelCase = f.readlines()
for lineTmp in lines:
__lowerCAmelCase = lineTmp.strip()
__lowerCAmelCase = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
__lowerCAmelCase = line[:idx]
__lowerCAmelCase = len(self.encoder )
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase_ : Tuple = random.Random()
if is_torch_available():
import torch
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Any=1.0 , __A : Optional[int]=None , __A : Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
if rng is None:
a_ : List[Any] = global_rng
a_ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=4_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_0_0_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : int=1_6_0_0_0 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=True , ) -> Optional[Any]:
a_ : str = parent
a_ : Dict = batch_size
a_ : List[Any] = min_seq_length
a_ : int = max_seq_length
a_ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ : Optional[int] = feature_size
a_ : Any = padding_value
a_ : Tuple = sampling_rate
a_ : Any = return_attention_mask
a_ : int = do_normalize
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Optional[Any]:
def _flatten(SCREAMING_SNAKE_CASE__ : List[str] ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) )
if equal_length:
a_ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a_ : Optional[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a_ : Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : int = ASTFeatureExtractor
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : str = ASTFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a_ : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a_ : Any = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test not batched input
a_ : List[Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
a_ : int = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# Test batched
a_ : Optional[int] = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='np' ).input_values
a_ : int = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a_ : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE__ )
a_ : str = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors='np' ).input_values
a_ : Tuple = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
import torch
a_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a_ : Optional[Any] = np.random.rand(1_0_0 ).astype(np.floataa )
a_ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a_ : List[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a_ : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
from datasets import load_dataset
a_ : Tuple = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
a_ : List[Any] = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ : Optional[int] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
a_ : int = self._load_datasamples(1 )
a_ : List[Any] = ASTFeatureExtractor()
a_ : str = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 570 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = DebertaTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = DebertaTokenizerFast
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
_lowerCAmelCase : Tuple = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCAmelCase : Tuple = {"unk_token": "[UNK]"}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = "lower newer"
_lowerCAmelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = "lower newer"
_lowerCAmelCase : Optional[int] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[int] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : str = tokenizer("Hello", "World")
_lowerCAmelCase : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"], __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("microsoft/deberta-base")
_lowerCAmelCase : int = tokenizer.encode("sequence builders", add_special_tokens=__a)
_lowerCAmelCase : Any = tokenizer.encode("multi-sequence build", add_special_tokens=__a)
_lowerCAmelCase : Tuple = tokenizer.encode(
"sequence builders", add_special_tokens=__a, add_prefix_space=__a)
_lowerCAmelCase : int = tokenizer.encode(
"sequence builders", "multi-sequence build", add_special_tokens=__a, add_prefix_space=__a)
_lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(__a)
_lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__a, __a)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
_lowerCAmelCase : int = tokenizer_class.from_pretrained("microsoft/deberta-base")
_lowerCAmelCase : Union[str, Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
_lowerCAmelCase : Optional[Any] = tokenizer(__a, padding=__a)
_lowerCAmelCase : str = [tokenizer.decode(__a, skip_special_tokens=__a) for seq in encoding["input_ids"]]
# fmt: off
_lowerCAmelCase : Dict = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowerCAmelCase : str = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data, __a)
for expected, decoded in zip(__a, __a):
self.assertEqual(__a, __a)
| 500 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _A ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def snake_case_ ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
snake_case : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = floats_tensor((1, 3, 16, 16) ,rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = floats_tensor((1, 3, 32, 32) ,rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
snake_case : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def snake_case_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case_ ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" )
def snake_case_ ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case_ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case_ ( self ):
'''simple docstring'''
self._test_save_load_local()
def snake_case_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 36 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__A = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__A = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__A = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__A = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__A = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__A = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__A = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __a ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_= randrange(len(__A ) ), randrange(len(__A ) )
UpperCAmelCase_= ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCAmelCase_, UpperCAmelCase_= SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __a ( lowerCAmelCase_ : Optional[int] = 1_00 ) -> Optional[int]:
'''simple docstring'''
return (generate_random_hand() for _ in range(__A ))
@pytest.mark.parametrize("""hand, expected""" ,__A )
def __a ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
assert PokerHand(__A )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" ,__A )
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
assert PokerHand(__A )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" ,__A )
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : int ,lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= PokerHand(__A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" ,__A )
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
assert PokerHand(__A )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" ,__A )
def __a ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
assert PokerHand(__A )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" ,__A )
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(__A ).compare_with(PokerHand(__A ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" ,generate_random_hands() )
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
assert PokerHand(__A ).compare_with(PokerHand(__A ) ) == expected
def __a ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= [PokerHand(__A ) for hand in SORTED_HANDS]
UpperCAmelCase_= poker_hands.copy()
shuffle(__A )
UpperCAmelCase_= chain(sorted(__A ) )
for index, hand in enumerate(__A ):
assert hand == poker_hands[index]
def __a ( ) -> str:
'''simple docstring'''
UpperCAmelCase_= [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=__A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __a ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= PokerHand("""2C 4S AS 3D 5C""" )
UpperCAmelCase_= True
UpperCAmelCase_= [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __a ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= 0
UpperCAmelCase_= os.path.abspath(os.path.dirname(__A ) )
UpperCAmelCase_= os.path.join(__A ,"""poker_hands.txt""" )
with open(__A ) as file_hand:
for line in file_hand:
UpperCAmelCase_= line[:14].strip()
UpperCAmelCase_= line[15:].strip()
UpperCAmelCase_, UpperCAmelCase_= PokerHand(__A ), PokerHand(__A )
UpperCAmelCase_= player.compare_with(__A )
if output == "Win":
answer += 1
assert answer == 3_76
| 593 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxAlbertModelTester(self )
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''albert-base-v2''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(A , attention_mask=A )[0]
_a = (1, 11, 768)
self.assertEqual(output.shape , A )
_a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE : str = ''' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'''
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase = black.format_str(lowerCamelCase , mode=lowerCamelCase )
_lowerCAmelCase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(lowerCamelCase , """w""" , newline="""\n""" ) as f:
f.write(lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase )
with open(lowerCamelCase , """r""" ) as f:
self.assertTrue(f.read() , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowerCamelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("""Bert""" , lowerCamelCase , lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowerCamelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowerCamelCase ) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_lowerCAmelCase , _lowerCAmelCase = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme["""format_model_list"""] )
self.assertFalse(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase )
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_lowerCAmelCase , _lowerCAmelCase = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase , lowerCamelCase ) | 156 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Tuple = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ["YolosFeatureExtractor"]
_lowerCAmelCase : List[str] = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 261 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 582 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> List[str]:
'''simple docstring'''
try:
A__ = int(__A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 514 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 | 0 |
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
while b:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = b, a % b
return a
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(__A ,a % b )
def UpperCAmelCase ( )-> Optional[Any]:
'''simple docstring'''
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 ,5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 ,3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 ,3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 ,6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 ,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 ,5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 ,3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 ,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 ,6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 ,3 )}''' )
if __name__ == "__main__":
main()
| 393 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
"""simple docstring"""
from math import loga
def a__ ( lowerCAmelCase ) -> Tuple:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__A , __A ):
raise TypeError("""Input value must be a \'int\' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 'wav2vec2'
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=3_2 , lowerCAmelCase_ : Dict=7_6_8 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Dict=1_2 , lowerCAmelCase_ : Optional[Any]=3_0_7_2 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-5 , lowerCAmelCase_ : Dict="group" , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase_ : List[str]=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase_ : Tuple=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]=1_2_8 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=0.05 , lowerCAmelCase_ : Optional[Any]=1_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : List[str]=1_0 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : List[str]=3_2_0 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : int=1_0_0 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : int=2_5_6 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str="sum" , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Dict=2_5_6 , lowerCAmelCase_ : Union[str, Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCAmelCase_ : Tuple=(5, 3, 3, 1, 1) , lowerCAmelCase_ : Any=(1, 2, 3, 1, 1) , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : int , ) -> int:
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = feat_extract_norm
__lowerCAmelCase = feat_extract_activation
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = conv_bias
__lowerCAmelCase = num_conv_pos_embeddings
__lowerCAmelCase = num_conv_pos_embedding_groups
__lowerCAmelCase = len(self.conv_dim )
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = feat_proj_dropout
__lowerCAmelCase = final_dropout
__lowerCAmelCase = layerdrop
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = vocab_size
__lowerCAmelCase = do_stable_layer_norm
__lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = apply_spec_augment
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase = num_codevectors_per_group
__lowerCAmelCase = num_codevector_groups
__lowerCAmelCase = contrastive_logits_temperature
__lowerCAmelCase = feat_quantizer_dropout
__lowerCAmelCase = num_negatives
__lowerCAmelCase = codevector_dim
__lowerCAmelCase = proj_codevector_dim
__lowerCAmelCase = diversity_loss_weight
# ctc loss
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# adapter
__lowerCAmelCase = add_adapter
__lowerCAmelCase = adapter_kernel_size
__lowerCAmelCase = adapter_stride
__lowerCAmelCase = num_adapter_layers
__lowerCAmelCase = output_hidden_size or hidden_size
__lowerCAmelCase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = xvector_output_dim
@property
def lowercase ( self : List[str] ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : int = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : str = {
'facebook/blenderbot_small-90M': 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = VOCAB_FILES_NAMES
snake_case__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : int = BlenderbotSmallTokenizer
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=SCREAMING_SNAKE_CASE__ , merges=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , ) , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Dict = add_prefix_space
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> List[Any]:
a_ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] = None ) -> List[int]:
a_ : Dict = [self.sep_token_id]
a_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 570 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
'''simple docstring'''
if start is None:
_lowerCAmelCase : int = 0
if end is None:
_lowerCAmelCase : List[str] = len(__A ) - 1
if start >= end:
return
_lowerCAmelCase : Optional[Any] = (start + end) // 2
slowsort(__A , __A , __A )
slowsort(__A , mid + 1 , __A )
if sequence[end] < sequence[mid]:
_lowerCAmelCase , _lowerCAmelCase : int = sequence[mid], sequence[end]
slowsort(__A , __A , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 500 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
'''simple docstring'''
def __init__(self ) -> Dict:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class __A ( A ):
'''simple docstring'''
def a__ (self , A , *A , **A ) -> Optional[Any]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __A ( A ):
'''simple docstring'''
def a__ (self , A , A ) -> int:
"""simple docstring"""
return output + 1
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
self.assertEqual(test_model._hf_hook , A )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
add_hook_to_module(A , A , append=A )
self.assertEqual(isinstance(test_model._hf_hook , A ) , A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , A , atol=1E-5 )
def a__ (self ) -> str:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , output + 2 , atol=1E-5 )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(A , AlignDevicesHook(io_same_device=A ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(A )
self.assertEqual(output.device , torch.device(0 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
_a = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(A , execution_device=A , offload=A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(A , execution_device=A , offload=A , offload_buffers=A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() , offload_buffers=A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 11 | 0 |
from ...configuration_utils import PretrainedConfig
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Any = 'bert-generation'
def __init__( self ,SCREAMING_SNAKE_CASE_=50358 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=24 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = vocab_size
snake_case : List[Any] = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[int] = intermediate_size
snake_case : List[Any] = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Dict = initializer_range
snake_case : Dict = layer_norm_eps
snake_case : str = position_embedding_type
snake_case : Any = use_cache
| 36 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 0 |
import random
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : List[str] = False ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= {i: [] for i in range(__A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__A ):
for j in range(i + 1 ,__A ):
if random.random() < probability:
graph[i].append(__A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__A )
return graph
def __a ( lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
return {
i: [j for j in range(__A ) if i != j] for i in range(__A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 593 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 | 0 |
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : str ) -> Any:
"""simple docstring"""
_lowerCAmelCase = [True] * n
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCAmelCase = i * 2
while index < n:
_lowerCAmelCase = False
_lowerCAmelCase = index + i
_lowerCAmelCase = [2]
for i in range(3 , __A , 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def __UpperCAmelCase ( snake_case_ : List[Any] = 999966663333 ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = math.floor(math.sqrt(__A ) ) + 100
_lowerCAmelCase = prime_sieve(__A )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCAmelCase = primes[prime_index + 1]
_lowerCAmelCase = last_prime**2
_lowerCAmelCase = next_prime**2
# Get numbers divisible by lps(current)
_lowerCAmelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCAmelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCAmelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCAmelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution()) | 156 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return len(set(__A)) == len(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCAmelCase : Dict = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case :
"""simple docstring"""
_lowerCAmelCase = PegasusConfig
_lowerCAmelCase = {}
_lowerCAmelCase = 'gelu'
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Dict = parent
snake_case__ : List[str] = batch_size
snake_case__ : int = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : List[Any] = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Tuple = pad_token_id
snake_case__ : int = bos_token_id
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
snake_case__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : Tuple = np.concatenate([input_ids, eos_tensor] , axis=1 )
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ : Optional[int] = prepare_pegasus_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = 20
snake_case__ : Dict = model_class_name(lowerCamelCase )
snake_case__ : Dict = model.encode(inputs_dict['''input_ids'''] )
snake_case__ ,snake_case__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case__ : int = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
snake_case__ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case__ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
snake_case__ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case__ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , )
snake_case__ : Union[str, Any] = model.decode(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = 20
snake_case__ : Dict = model_class_name(lowerCamelCase )
snake_case__ : Any = model.encode(inputs_dict['''input_ids'''] )
snake_case__ ,snake_case__ : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case__ : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case__ : Tuple = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
snake_case__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
snake_case__ : List[str] = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase )
snake_case__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _A ( snake_case__ : Dict , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=None , ):
if attention_mask is None:
snake_case__ : Tuple = np.not_equal(__A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
snake_case__ : Any = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_lowerCAmelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Tuple = FlaxPegasusModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ ,snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
snake_case__ : Any = model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
snake_case__ : List[str] = encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case__ : int = encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ ,snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : List[Any] = model_class(lowerCamelCase )
snake_case__ : List[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
snake_case__ : Union[str, Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return model.decode(
decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
snake_case__ : Tuple = decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case__ : List[Any] = decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case__ : Optional[int] = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=lowerCamelCase )
snake_case__ : Tuple = np.ones((1, 1) )
snake_case__ : int = model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
snake_case__ : Union[str, Any] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
snake_case__ : Dict = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
snake_case__ : str = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
snake_case__ : Optional[int] = tokenizer(lowerCamelCase , return_tensors='''np''' , truncation=lowerCamelCase , max_length=512 , padding=lowerCamelCase )
snake_case__ : Union[str, Any] = model.generate(**lowerCamelCase , num_beams=2 ).sequences
snake_case__ : List[str] = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
assert tgt_text == decoded
| 261 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if len(__A) == 0:
return False
_a = len(__A) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __A)
else:
return binary_search(a_list[midpoint + 1 :] , __A)
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be found in the list:\n").strip())
lowercase_ = "" if binary_search(sequence, target) else "not "
print(F"""{target} was {not_str}found in {sequence}""")
| 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : str=1_8 , SCREAMING_SNAKE_CASE__ : int=3_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=4_0_0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : int=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__a = size if size is not None else {"""height""": 1_8, """width""": 1_8}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
def __a ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :str =DPTImageProcessor if is_vision_available() else None
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = DPTImageProcessingTester(self )
@property
def __a ( self : Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_std""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """size""" ) )
def __a ( self : Dict ):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __a ( self : Tuple ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __a ( self : str ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 582 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = len(A )
_a = [0] * len_array
if len_array > 0:
_a = array[0]
for i in range(1 , A ):
_a = self.prefix_sum[i - 1] + array[i]
def a__ (self , A , A ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self , A ) -> bool:
"""simple docstring"""
_a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: int ) -> List[str]:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__A , int(b / 2 ) ) * actual_power(__A , int(b / 2 ) )
else:
return a * actual_power(__A , int(b / 2 ) ) * actual_power(__A , int(b / 2 ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
if b < 0:
return 1 / actual_power(__A , __A )
return actual_power(__A , __A )
if __name__ == "__main__":
print(power(-2, -3))
| 514 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
A_ = 4
A_ = 3
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
pass
def UpperCAmelCase ( UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
for shard in shards:
for i in range(__A ):
yield {"i": i, "shard": shard}
def UpperCAmelCase ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = int(os.environ['''RANK'''] )
SCREAMING_SNAKE_CASE_ = int(os.environ['''WORLD_SIZE'''] )
SCREAMING_SNAKE_CASE_ = ArgumentParser()
parser.add_argument('''--streaming''' ,type=__A )
parser.add_argument('''--local_rank''' ,type=__A )
parser.add_argument('''--num_workers''' ,type=__A ,default=0 )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = args.streaming
SCREAMING_SNAKE_CASE_ = args.num_workers
SCREAMING_SNAKE_CASE_ = {'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(__A )]}
SCREAMING_SNAKE_CASE_ = IterableDataset.from_generator(__A ,gen_kwargs=__A )
if not streaming:
SCREAMING_SNAKE_CASE_ = Dataset.from_list(list(__A ) )
SCREAMING_SNAKE_CASE_ = split_dataset_by_node(__A ,rank=__A ,world_size=__A )
SCREAMING_SNAKE_CASE_ = torch.utils.data.DataLoader(__A ,num_workers=__A )
SCREAMING_SNAKE_CASE_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE_ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 393 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_A = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_A = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_A = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_A = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
_A = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
_A = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
_A = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_A = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_A = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizer
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = DPRQuestionEncoderTokenizer
_A = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_A = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_A = R"""\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n """
@add_start_docstrings(lowerCAmelCase__ )
class lowerCamelCase :
'''simple docstring'''
def __call__(self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
elif titles is None or texts is None:
UpperCAmelCase__ : Dict = titles if texts is None else texts
return super().__call__(
_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : int = titles if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [titles]
UpperCAmelCase__ : int = texts if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [texts]
UpperCAmelCase__ : int = len(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = questions if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [questions] * n_passages
assert len(_lowerCamelCase ) == len(
_lowerCamelCase ), F"""There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts."""
UpperCAmelCase__ : List[Any] = super().__call__(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )["""input_ids"""]
UpperCAmelCase__ : Dict = super().__call__(_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )["""input_ids"""]
UpperCAmelCase__ : Any = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase , _lowerCamelCase )
]
}
if return_attention_mask is not False:
UpperCAmelCase__ : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase__ : Tuple = attention_mask
return self.pad(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = 4 , ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = reader_input["""input_ids"""]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = reader_output[:3]
UpperCAmelCase__ : List[Any] = len(_lowerCamelCase )
UpperCAmelCase__ : Any = sorted(range(_lowerCamelCase ) , reverse=_lowerCamelCase , key=relevance_logits.__getitem__ )
UpperCAmelCase__ : int = []
for doc_id in sorted_docs:
UpperCAmelCase__ : Optional[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase__ : Any = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase__ : Dict = len(_lowerCamelCase )
UpperCAmelCase__ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCamelCase , top_spans=_lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCamelCase , start_index=_lowerCamelCase , end_index=_lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase__ : int = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
UpperCAmelCase__ : Optional[Any] = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = DPRReaderTokenizer
| 182 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if os.path.exists(__A):
if os.path.exists(os.path.join(__A , '''config.json''')) and os.path.isfile(
os.path.join(__A , '''config.json''')):
os.remove(os.path.join(__A , '''config.json'''))
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''')) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''')):
os.remove(os.path.join(__A , '''pytorch_model.bin'''))
else:
os.makedirs(__A)
model.save_pretrained(__A)
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
_a = 2
if unlogit:
_a = torch.pow(__A , __A)
_a = p * torch.log(__A)
_a = 0
return -plogp.sum(dim=-1)
def lowerCAmelCase (__A):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__A))))
for row in range(len(__A)):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data))
def lowerCAmelCase (__A , __A , __A , __A=True , __A=True , __A=None , __A=False):
"""simple docstring"""
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(__A , __A).to(args.device)
_a = torch.zeros(__A , __A).to(args.device)
if head_mask is None:
_a = torch.ones(__A , __A).to(args.device)
head_mask.requires_grad_(requires_grad=__A)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0])):
_a = tuple(t.to(args.device) for t in inputs)
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(__A , labels=__A , head_mask=__A)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A):
_a = entropy(attn.detach() , __A)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(__A , __A).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''')
print_ad_tensor(__A)
if compute_importance:
logger.info('''Head importance scores''')
print_ad_tensor(__A)
logger.info('''Head ranked by importance scores''')
_a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
_a = torch.arange(
head_importance.numel() , device=args.device)
_a = head_ranks.view_as(__A)
print_ad_tensor(__A)
return attn_entropy, head_importance, total_loss
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a , _a , _a = compute_heads_importance(__A , __A , __A , compute_entropy=__A)
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold)
_a = torch.ones_like(__A)
_a = max(1 , int(new_head_mask.numel() * args.masking_amount))
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('''Inf''')
_a = head_importance.view(-1).sort()[1]
if len(__A) <= num_to_mask:
print('''BREAK BY num_to_mask''')
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist()))
_a = new_head_mask.view(-1)
_a = 0.0
_a = new_head_mask.view_as(__A)
_a = new_head_mask.clone().detach()
print_ad_tensor(__A)
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A)
_a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''')
print_ad_tensor(__A)
np.save(os.path.join(args.output_dir , '''head_mask.npy''') , head_mask.detach().cpu().numpy())
return head_mask
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A)
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters())
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A))
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A):
_a = [
v,
]
assert sum(len(__A) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A)
_a = sum(p.numel() for p in model.parameters())
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A)
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100)
save_model(__A , args.output_dir)
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''')
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''')
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''')
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''')
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''')
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''')
parser.add_argument('''--seed''' , type=__A , default=42)
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''')
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''')
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''')
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
_a = torch.device('''cuda''' , args.local_rank)
_a = 1
torch.distributed.init_process_group(backend='''nccl''') # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1)))
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A)
elif args.n_gpu > 1:
_a = nn.DataParallel(__A)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A)
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin'''))
logger.info('''Training/evaluation parameters %s''' , __A)
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
_a = (torch.from_numpy(__A),)
_a = TensorDataset(*__A)
_a = RandomSampler(__A)
_a = DataLoader(__A , sampler=__A , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(__A , __A , __A)
prune_heads(__A , __A , __A , __A)
if __name__ == "__main__":
main()
| 11 | 0 |
def a_ ( lowerCAmelCase_ : List[str] ):
for i in range(len(__A ) - 1, 0, -1 ):
__lowerCAmelCase = False
for j in range(__A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCAmelCase , __lowerCAmelCase = unsorted[j - 1], unsorted[j]
__lowerCAmelCase = True
for j in range(__A ):
if unsorted[j] > unsorted[j + 1]:
__lowerCAmelCase , __lowerCAmelCase = unsorted[j + 1], unsorted[j]
__lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : str = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[int] = [int(item) for item in user_input.split(',')]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Any = KandinskyVaaPriorPipeline
snake_case__ : Optional[Any] = ['prompt']
snake_case__ : Tuple = ['prompt', 'negative_prompt']
snake_case__ : Union[str, Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case__ : int = False
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
a_ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
torch.manual_seed(0 )
a_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
torch.manual_seed(0 )
a_ : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 1_2,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a_ : Optional[int] = PriorTransformer(**SCREAMING_SNAKE_CASE__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a_ : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
torch.manual_seed(0 )
a_ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
a_ : Optional[Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : int = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : List[str] = self.dummy_prior
a_ : Dict = self.dummy_image_encoder
a_ : str = self.dummy_text_encoder
a_ : Any = self.dummy_tokenizer
a_ : Optional[Any] = self.dummy_image_processor
a_ : List[str] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=10.0 , )
a_ : Union[str, Any] = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Any = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
a_ : Union[str, Any] = 'cpu'
a_ : Optional[Any] = self.get_dummy_components()
a_ : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
a_ : Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
a_ : List[Any] = output.image_embeds
a_ : List[Any] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
a_ : Dict = image[0, -1_0:]
a_ : int = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
a_ : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : Union[str, Any] = torch_device == 'cpu'
a_ : Any = True
a_ : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , )
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Tuple = torch_device == 'cpu'
a_ : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , )
| 570 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(__A ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase : str = min(__A ), max(__A )
_lowerCAmelCase : List[str] = int(max_value - min_value ) + 1
_lowerCAmelCase : Union[str, Any] = [[] for _ in range(__A )]
for i in my_list:
buckets[int(i - min_value )].append(__A )
return [v for bucket in buckets for v in sorted(__A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 500 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=None ,):
'''simple docstring'''
snake_case : str = parent
snake_case : Union[str, Any] = batch_size
snake_case : List[str] = seq_length
snake_case : Optional[Any] = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Any = use_token_type_ids
snake_case : Optional[Any] = use_labels
snake_case : Tuple = vocab_size
snake_case : List[str] = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : Optional[int] = type_vocab_size
snake_case : Optional[Any] = type_sequence_label_size
snake_case : List[str] = initializer_range
snake_case : Dict = num_labels
snake_case : str = num_choices
snake_case : Any = scope
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case : Optional[int] = None
if self.use_input_mask:
snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Dict = None
if self.use_token_type_ids:
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case : str = None
snake_case : List[Any] = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE_ ,initializer_range=self.initializer_range ,use_stable_embedding=SCREAMING_SNAKE_CASE_ ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = OpenLlamaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Dict = model(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ )
snake_case : int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Dict = True
snake_case : Union[str, Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : int = model(
SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=SCREAMING_SNAKE_CASE_ ,encoder_attention_mask=SCREAMING_SNAKE_CASE_ ,)
snake_case : List[str] = model(
SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=SCREAMING_SNAKE_CASE_ ,)
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Tuple = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Any = model(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Dict = True
snake_case : Union[str, Any] = True
snake_case : Optional[int] = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# first forward pass
snake_case : Any = model(
SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=SCREAMING_SNAKE_CASE_ ,encoder_attention_mask=SCREAMING_SNAKE_CASE_ ,use_cache=SCREAMING_SNAKE_CASE_ ,)
snake_case : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case : Dict = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case : Dict = model(
SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=SCREAMING_SNAKE_CASE_ ,encoder_attention_mask=SCREAMING_SNAKE_CASE_ ,output_hidden_states=SCREAMING_SNAKE_CASE_ ,)["""hidden_states"""][0]
snake_case : List[str] = model(
SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=SCREAMING_SNAKE_CASE_ ,encoder_attention_mask=SCREAMING_SNAKE_CASE_ ,past_key_values=SCREAMING_SNAKE_CASE_ ,output_hidden_states=SCREAMING_SNAKE_CASE_ ,)["""hidden_states"""][0]
# select random slice
snake_case : Optional[int] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=1E-3 ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Union[str, Any] = config_and_inputs
snake_case : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( snake_case , snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = OpenLlamaModelTester(self )
snake_case : Dict = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE_ ,hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Union[str, Any] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Any = 3
snake_case : str = input_dict["""input_ids"""]
snake_case : int = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
snake_case : int = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case : Dict = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : int = model(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Union[str, Any] = 3
snake_case : Tuple = """single_label_classification"""
snake_case : Union[str, Any] = input_dict["""input_ids"""]
snake_case : Any = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case : Tuple = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Dict = model(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : int = 3
snake_case : List[Any] = """multi_label_classification"""
snake_case : Any = input_dict["""input_ids"""]
snake_case : str = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case : Any = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Any = ids_tensor([1, 10] ,config.vocab_size )
snake_case : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : Optional[int] = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
original_model.eval()
snake_case : Any = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
snake_case : Tuple = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : Union[str, Any] = {"""type""": scaling_type, """factor""": 10.0}
snake_case : Union[str, Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
scaled_model.to(SCREAMING_SNAKE_CASE_ )
scaled_model.eval()
snake_case : List[Any] = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
snake_case : Dict = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=1E-5 ) )
| 36 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11 | 0 |
def __a ( lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__A = int(input('''Enter number: ''').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 593 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxAlbertModelTester(self )
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''albert-base-v2''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(A , attention_mask=A )[0]
_a = (1, 11, 768)
self.assertEqual(output.shape , A )
_a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline | 156 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_lowerCAmelCase : Dict = "hf-internal-testing/tiny-random-bert"
_lowerCAmelCase : str = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
_lowerCAmelCase : Dict = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = cached_file(lowerCamelCase , lowerCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCamelCase , lowerCamelCase ) ) )
with open(os.path.join(lowerCamelCase , '''refs''' , '''main''' ) ) as f:
snake_case__ : Dict = f.read()
self.assertEqual(lowerCamelCase , os.path.join(lowerCamelCase , '''snapshots''' , lowerCamelCase , lowerCamelCase ) )
self.assertTrue(os.path.isfile(lowerCamelCase ) )
# File is cached at the same place the second time.
snake_case__ : Optional[int] = cached_file(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
# Using a specific revision to test the full commit hash.
snake_case__ : Optional[Any] = cached_file(lowerCamelCase , lowerCamelCase , revision='''9b8c223''' )
self.assertEqual(lowerCamelCase , os.path.join(lowerCamelCase , '''snapshots''' , lowerCamelCase , lowerCamelCase ) )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase , '''is not a valid model identifier''' ):
snake_case__ : str = cached_file('''tiny-random-bert''' , lowerCamelCase )
with self.assertRaisesRegex(lowerCamelCase , '''is not a valid git identifier''' ):
snake_case__ : str = cached_file(lowerCamelCase , lowerCamelCase , revision='''aaaa''' )
with self.assertRaisesRegex(lowerCamelCase , '''does not appear to have a file named''' ):
snake_case__ : Union[str, Any] = cached_file(lowerCamelCase , '''conf''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase , '''does not appear to have a file named''' ):
snake_case__ : List[Any] = cached_file(lowerCamelCase , '''conf''' )
with open(os.path.join(lowerCamelCase , '''refs''' , '''main''' ) ) as f:
snake_case__ : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase , '''.no_exist''' , lowerCamelCase , '''conf''' ) ) )
snake_case__ : int = cached_file(lowerCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=lowerCamelCase )
self.assertIsNone(lowerCamelCase )
snake_case__ : str = cached_file(lowerCamelCase , '''conf''' , local_files_only=lowerCamelCase , _raise_exceptions_for_missing_entries=lowerCamelCase )
self.assertIsNone(lowerCamelCase )
snake_case__ : Any = mock.Mock()
snake_case__ : Optional[int] = 500
snake_case__ : str = {}
snake_case__ : str = HTTPError
snake_case__ : Optional[int] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase ) as mock_head:
snake_case__ : str = cached_file(lowerCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=lowerCamelCase )
self.assertIsNone(lowerCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self ) -> int:
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , lowerCamelCase ) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCamelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , lowerCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCamelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , lowerCamelCase , revision='''ahaha''' )
snake_case__ : Tuple = get_file_from_repo('''bert-base-cased''' , lowerCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case__ : Tuple = json.loads(open(lowerCamelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Optional[int] = Path(lowerCamelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(lowerCamelCase , '''a.txt''' ) , str(lowerCamelCase ) )
self.assertIsNone(get_file_from_repo(lowerCamelCase , '''b.txt''' ) )
| 261 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Union[str, Any] =[
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a = deprecated_arg[3:]
__a = not kwargs.pop(SCREAMING_SNAKE_CASE__ )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__a = kwargs.pop("""tpu_name""" , self.tpu_name )
__a = kwargs.pop("""device_idx""" , self.device_idx )
__a = kwargs.pop("""eager_mode""" , self.eager_mode )
__a = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ :str =field(
default=snake_case__ , metadata={"""help""": """Name of TPU"""} , )
a_ :int =field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
a_ :bool =field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} )
a_ :bool =field(
default=snake_case__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __a ( self : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
__a = None
if self.tpu:
try:
if self.tpu_name:
__a = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__a = None
return tpu
@cached_property
def __a ( self : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__a = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__a = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__a = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def __a ( self : List[str] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def __a ( self : Any ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def __a ( self : List[str] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __a ( self : Tuple ):
'''simple docstring'''
return self.n_gpu > 0
| 582 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase__ = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase__ = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> None:
'''simple docstring'''
A__ = None
for i in sorted(lowercase , reverse=lowercase ):
A__ = Node(lowercase , self.head )
def __iter__( self ) -> Iterator[int]:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next_node
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self ) -> str:
'''simple docstring'''
return " -> ".join([str(lowercase ) for node in self] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Tuple:
'''simple docstring'''
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 514 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 | 0 |
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = str(__A )
return len(__A ) == 9 and set(__A ) == set('''123456789''' )
def UpperCAmelCase ( )-> int:
'''simple docstring'''
for base_num in range(9999 ,4999 ,-1 ):
SCREAMING_SNAKE_CASE_ = 100002 * base_num
if is_9_pandigital(__A ):
return candidate
for base_num in range(333 ,99 ,-1 ):
SCREAMING_SNAKE_CASE_ = 1002003 * base_num
if is_9_pandigital(__A ):
return candidate
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 393 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase = 1 , lowerCAmelCase = 10_00 ) -> Dict:
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Any = 0
for divide_by_number in range(__A , digit + 1 ):
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : str = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__A ):
UpperCAmelCase__ : Tuple = len(__A )
UpperCAmelCase__ : int = divide_by_number
else:
has_been_divided.append(__A )
UpperCAmelCase__ : str = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=9_9 , lowerCAmelCase_ : Tuple=3_6 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[Any]=5_1_2 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Optional[Any]=6 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=1_0_0_0 , ) -> List[str]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = text_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = coordinate_size
__lowerCAmelCase = shape_size
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase = text_seq_length
__lowerCAmelCase = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase = self.text_seq_length + self.image_seq_length
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase = bbox[i, j, 3]
__lowerCAmelCase = bbox[i, j, 1]
__lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase = bbox[i, j, 2]
__lowerCAmelCase = bbox[i, j, 0]
__lowerCAmelCase = t
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> List[str]:
__lowerCAmelCase = LayoutLMvaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# text + image
__lowerCAmelCase = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
__lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase = model(pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LayoutLMvaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LayoutLMvaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = False
a_ = False
a_ = False
a_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowercase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> Dict:
return True
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = LayoutLMvaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=False ) -> List[str]:
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
if model_class in get_values(lowerCAmelCase_ ):
__lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
__lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in get_values(lowerCAmelCase_ ):
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
return inputs_dict
def lowercase ( self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
@slow
def lowercase ( self : Union[str, Any] ) -> List[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = LayoutLMvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : List[Any] ) -> Optional[int]:
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None
@slow
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).pixel_values.to(lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([[1, 2]] )
__lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__lowerCAmelCase = model(
input_ids=input_ids.to(lowerCAmelCase_ ) , bbox=bbox.to(lowerCAmelCase_ ) , pixel_values=pixel_values.to(lowerCAmelCase_ ) , )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> str:
"""simple docstring"""
if num <= 0:
a_ : Any = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(__A )
a_ : Dict = [True] * (num + 1)
a_ : Union[str, Any] = []
a_ : Optional[int] = 2
a_ : Optional[int] = int(math.sqrt(__A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__A )
# Set multiples of start be False
for i in range(start * start , num + 1 , __A ):
if sieve[i] is True:
a_ : Optional[int] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 570 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11 | 0 |
from math import sqrt
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : Optional[int] = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 500 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
'''simple docstring'''
def __init__(self ) -> Dict:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class __A ( A ):
'''simple docstring'''
def a__ (self , A , *A , **A ) -> Optional[Any]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __A ( A ):
'''simple docstring'''
def a__ (self , A , A ) -> int:
"""simple docstring"""
return output + 1
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
self.assertEqual(test_model._hf_hook , A )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
add_hook_to_module(A , A , append=A )
self.assertEqual(isinstance(test_model._hf_hook , A ) , A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , A , atol=1E-5 )
def a__ (self ) -> str:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , output + 2 , atol=1E-5 )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(A , AlignDevicesHook(io_same_device=A ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(A )
self.assertEqual(output.device , torch.device(0 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
_a = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(A , execution_device=A , offload=A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(A , execution_device=A , offload=A , offload_buffers=A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() , offload_buffers=A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 11 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
__lowercase : List[Any] = '''\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'''
__lowercase : List[Any] = '''\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'''
__lowercase : List[Any] = '''\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] ,)
def snake_case_ ( self ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="uniform_average" ,SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
snake_case : Optional[int] = mean_squared_error(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,sample_weight=SCREAMING_SNAKE_CASE_ ,multioutput=SCREAMING_SNAKE_CASE_ ,squared=SCREAMING_SNAKE_CASE_ )
return {"mse": mse}
| 36 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : str = 'owlvit_text_model'
def __init__( self : int , __UpperCAmelCase : Any=49_408 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : Optional[Any]=2_048 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : Union[str, Any]=8 , __UpperCAmelCase : Optional[int]=16 , __UpperCAmelCase : Optional[Any]="quick_gelu" , __UpperCAmelCase : Optional[Any]=1E-5 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : List[Any]=49_406 , __UpperCAmelCase : Optional[Any]=49_407 , **__UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= hidden_act
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= attention_dropout
UpperCAmelCase_= initializer_range
UpperCAmelCase_= initializer_factor
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] , __UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase_, UpperCAmelCase_= cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
UpperCAmelCase_= config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Dict = 'owlvit_vision_model'
def __init__( self : int , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : Any=3_072 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Tuple=768 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : Union[str, Any]="quick_gelu" , __UpperCAmelCase : Tuple=1E-5 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=1.0 , **__UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
UpperCAmelCase_= hidden_size
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= num_channels
UpperCAmelCase_= image_size
UpperCAmelCase_= patch_size
UpperCAmelCase_= hidden_act
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= attention_dropout
UpperCAmelCase_= initializer_range
UpperCAmelCase_= initializer_factor
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase_, UpperCAmelCase_= cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
UpperCAmelCase_= config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Optional[int] = 'owlvit'
a__ : List[str] = True
def __init__( self : Tuple , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : List[str]=2.6_592 , __UpperCAmelCase : Dict=True , **__UpperCAmelCase : List[Any] , ) -> Optional[int]:
super().__init__(**__UpperCAmelCase )
if text_config is None:
UpperCAmelCase_= {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase_= {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
UpperCAmelCase_= OwlViTTextConfig(**__UpperCAmelCase )
UpperCAmelCase_= OwlViTVisionConfig(**__UpperCAmelCase )
UpperCAmelCase_= projection_dim
UpperCAmelCase_= logit_scale_init_value
UpperCAmelCase_= return_dict
UpperCAmelCase_= 1.0
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , __UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase_, UpperCAmelCase_= cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase_= {}
UpperCAmelCase_= text_config
UpperCAmelCase_= vision_config
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_= copy.deepcopy(self.__dict__ )
UpperCAmelCase_= self.text_config.to_dict()
UpperCAmelCase_= self.vision_config.to_dict()
UpperCAmelCase_= self.__class__.model_type
return output
class lowercase ( snake_case__):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1E-4
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] = -1 , __UpperCAmelCase : Dict = -1 , __UpperCAmelCase : int = None , ) -> Mapping[str, Any]:
UpperCAmelCase_= super().generate_dummy_inputs(
processor.tokenizer , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , framework=__UpperCAmelCase )
UpperCAmelCase_= super().generate_dummy_inputs(
processor.image_processor , batch_size=__UpperCAmelCase , framework=__UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 14
| 593 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
assert column_title.isupper()
_lowerCAmelCase = 0
_lowerCAmelCase = len(__A ) - 1
_lowerCAmelCase = 0
while index >= 0:
_lowerCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 156 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return len(set(__A)) == len(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Any = num_of_nodes
snake_case__ : Union[str, Any] = []
snake_case__ : List[Any] = {}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case__ : int = self.find_component(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
snake_case__ : Union[str, Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
snake_case__ : str = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def lowercase__ ( self ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = 0
snake_case__ : Any = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case__ : List[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case__ ,snake_case__ ,snake_case__ : Dict = edge
snake_case__ : List[str] = self.m_component[u]
snake_case__ : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ ,snake_case__ ,snake_case__ : str = edge
snake_case__ : int = self.m_component[u]
snake_case__ : int = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
snake_case__ : Any = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if len(__A) == 0:
return False
_a = len(__A) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __A)
else:
return binary_search(a_list[midpoint + 1 :] , __A)
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be found in the list:\n").strip())
lowercase_ = "" if binary_search(sequence, target) else "not "
print(F"""{target} was {not_str}found in {sequence}""")
| 11 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
if isinstance(__A , torch.Tensor ):
return image
elif isinstance(__A , PIL.Image.Image ):
__a = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__a = np.concatenate(__A , axis=0 )
__a = np.array(__A ).astype(np.floataa ) / 255.0
__a = image.transpose(0 , 3 , 1 , 2 )
__a = 2.0 * image - 1.0
__a = torch.from_numpy(__A )
elif isinstance(image[0] , torch.Tensor ):
__a = torch.cat(__A , dim=0 )
return image
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0.9_995 ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(__A , np.ndarray ):
__a = True
__a = va.device
__a = va.cpu().numpy()
__a = va.cpu().numpy()
__a = np.sum(va * va / (np.linalg.norm(__A ) * np.linalg.norm(__A )) )
if np.abs(__A ) > DOT_THRESHOLD:
__a = (1 - t) * va + t * va
else:
__a = np.arccos(__A )
__a = np.sin(__A )
__a = theta_a * t
__a = np.sin(__A )
__a = np.sin(theta_a - theta_t ) / sin_theta_a
__a = sin_theta_t / sin_theta_a
__a = sa * va + sa * va
if inputs_are_torch:
__a = torch.from_numpy(__A ).to(__A )
return va
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__a = F.normalize(__A , dim=-1 )
__a = F.normalize(__A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for param in model.parameters():
__a = value
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Tuple=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , clip_model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , coca_model=SCREAMING_SNAKE_CASE__ , coca_tokenizer=SCREAMING_SNAKE_CASE__ , coca_transform=SCREAMING_SNAKE_CASE__ , )
__a = (
feature_extractor.size
if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE__ )
else feature_extractor.size["""shortest_edge"""]
)
__a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE__ )
set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE__ )
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ )
def __a ( self : Any ):
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE__ )
def __a ( self : List[Any] ):
'''simple docstring'''
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE__ )
def __a ( self : str ):
'''simple docstring'''
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE__ )
def __a ( self : str ):
'''simple docstring'''
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE__ )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE__ )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
__a = image.to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
__a = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
else:
__a = self.vae.encode(SCREAMING_SNAKE_CASE__ ).latent_dist.sample(SCREAMING_SNAKE_CASE__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 0.1_8_2_1_5 * init_latents
__a = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
__a = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
# get latents
__a = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = init_latents
return latents
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a = self.coca_transform(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE__ )
__a = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
__a = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE__ )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
__a = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
__a = latents.detach().requires_grad_()
__a = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
__a = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__a = self.scheduler.alphas_cumprod[timestep]
__a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__a = torch.sqrt(SCREAMING_SNAKE_CASE__ )
__a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE__ ):
__a = self.scheduler.sigmas[index]
__a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8_2_1_5 * sample
__a = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE__ )
__a = self.normalize(SCREAMING_SNAKE_CASE__ ).to(latents.dtype )
__a = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE__ )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
__a = spherical_dist_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).mean() * clip_guidance_scale
__a = -torch.autograd.grad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE__ ):
__a = latents.detach() + grads * (sigma**2)
__a = noise_pred_original
else:
__a = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : Optional[int] = 5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any] = 5_1_2 , SCREAMING_SNAKE_CASE__ : List[str] = 0.6 , SCREAMING_SNAKE_CASE__ : Any = 5_0 , SCREAMING_SNAKE_CASE__ : Any = 7.5 , SCREAMING_SNAKE_CASE__ : Tuple = 1 , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.0 , SCREAMING_SNAKE_CASE__ : Dict = 1_0_0 , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : Optional[int] = "pil" , SCREAMING_SNAKE_CASE__ : Optional[Any] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = 0.8 , SCREAMING_SNAKE_CASE__ : int = 0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.1 , ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE__ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Generator ) and batch_size > 1:
__a = [generator] + [None] * (batch_size - 1)
__a = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
__a = [x[0] for x in coca_is_none if x[1]]
__a = """, """.join(SCREAMING_SNAKE_CASE__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(SCREAMING_SNAKE_CASE__ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(SCREAMING_SNAKE_CASE__ )
# get prompt text embeddings for content and style
__a = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , )
__a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__a = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , )
__a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__a = slerp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# duplicate text embeddings for each generation per prompt
__a = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
# set timesteps
__a = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__a = {}
if accepts_offset:
__a = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__a , __a = self.get_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.device )
__a = timesteps[:1].repeat(SCREAMING_SNAKE_CASE__ )
# Preprocess image
__a = preprocess(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = self.prepare_latents(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE__ )
__a = preprocess(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = self.prepare_latents(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE__ )
__a = slerp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if clip_guidance_scale > 0:
__a = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = slerp(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = content_text_input.input_ids.shape[-1]
__a = self.tokenizer([""""""] , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__a = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__a = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE__ ).to(
self.device )
else:
__a = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
# check if the scheduler accepts generator
__a = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__a = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE__ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE__ ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
__a = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__a , __a = self.cond_fn(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8_2_1_5 * latents
__a = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE__ , nsfw_content_detected=SCREAMING_SNAKE_CASE__ )
| 582 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = len(A )
_a = [0] * len_array
if len_array > 0:
_a = array[0]
for i in range(1 , A ):
_a = self.prefix_sum[i - 1] + array[i]
def a__ (self , A , A ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self , A ) -> bool:
"""simple docstring"""
_a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase__ = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 2_5_6
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Tuple:
'''simple docstring'''
if len(__A ) < MIN_NUM_TOKENS:
return None
A__ = MinHash(num_perm=__A )
for token in set(__A ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[Any]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__A ) if len(t.strip() ) > 0}
class a__ :
"""simple docstring"""
def __init__( self , *,
lowercase = 0.85 , ) -> Optional[int]:
'''simple docstring'''
A__ = duplication_jaccard_threshold
A__ = NUM_PERM
A__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
A__ = defaultdict(lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
A__ = self._index.query(lowercase )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(lowercase , lowercase )
if len(lowercase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase )
def UpperCamelCase ( self ) -> List[List[Dict]]:
'''simple docstring'''
A__ = []
for base, duplicates in self._duplicate_clusters.items():
A__ = [base] + list(lowercase )
# reformat the cluster to be a list of dict
A__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowercase )
return duplicate_clusters
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
A__ = self.get_duplicate_clusters()
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Tuple:
'''simple docstring'''
A__ , A__ = element
A__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ = DuplicationIndex(duplication_jaccard_threshold=__A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A ) ) , max_queue_size=1_0_0 ) ):
di.add(__A , __A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[str] ) -> str:
'''simple docstring'''
A__ = get_tokens(__A )
A__ = get_tokens(__A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase__ = None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = []
for elementa in cluster:
A__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
A__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(__A , __A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
A__ = 1
extremes.append(__A )
return extremes
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int ) -> str:
'''simple docstring'''
global _shared_dataset
A__ = dataset
A__ = []
A__ = partial(_find_cluster_extremes_shared , jaccard_threshold=__A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A ) , ):
extremes_list.append(__A )
return extremes_list
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str = 0.85 ) -> Any:
'''simple docstring'''
A__ = make_duplicate_clusters(__A , __A )
A__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
A__ = {}
A__ = find_extremes(__A , __A , __A )
for extremes in extremes_clusters:
for element in extremes:
A__ = element
A__ = duplicate_indices - set(extreme_dict.keys() )
A__ = dataset.filter(lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : idx not in remove_indices , with_indices=__A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
A__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
A__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(__A )}' )
print(F'Number of duplicate clusters: {len(__A )}' )
print(F'Files in duplicate cluster: {len(__A )}' )
print(F'Unique files in duplicate cluster: {len(__A )}' )
print(F'Filtered dataset size: {len(__A )}' )
return ds_filter, duplicate_clusters
| 514 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCAmelCase_ )
@slow
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=lowerCAmelCase_ )
assert mmeta["long_pair"] == "heb-eng"
| 393 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'upernet'
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=512 , _lowerCamelCase=0.02 , _lowerCamelCase=[1, 2, 3, 6] , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=384 , _lowerCamelCase=256 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=255 , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase__ : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : Any = backbone_config.get("""model_type""" )
UpperCAmelCase__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : List[Any] = config_class.from_dict(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = backbone_config
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : List[str] = pool_scales
UpperCAmelCase__ : List[Any] = use_auxiliary_head
UpperCAmelCase__ : Tuple = auxiliary_loss_weight
UpperCAmelCase__ : Dict = auxiliary_in_channels
UpperCAmelCase__ : Union[str, Any] = auxiliary_channels
UpperCAmelCase__ : Optional[Any] = auxiliary_num_convs
UpperCAmelCase__ : List[Any] = auxiliary_concat_input
UpperCAmelCase__ : Any = loss_ignore_index
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Optional[int] = self.backbone_config.to_dict()
UpperCAmelCase__ : Optional[int] = self.__class__.model_type
return output
| 182 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if os.path.exists(__A):
if os.path.exists(os.path.join(__A , '''config.json''')) and os.path.isfile(
os.path.join(__A , '''config.json''')):
os.remove(os.path.join(__A , '''config.json'''))
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''')) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''')):
os.remove(os.path.join(__A , '''pytorch_model.bin'''))
else:
os.makedirs(__A)
model.save_pretrained(__A)
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
_a = 2
if unlogit:
_a = torch.pow(__A , __A)
_a = p * torch.log(__A)
_a = 0
return -plogp.sum(dim=-1)
def lowerCAmelCase (__A):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__A))))
for row in range(len(__A)):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data))
def lowerCAmelCase (__A , __A , __A , __A=True , __A=True , __A=None , __A=False):
"""simple docstring"""
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(__A , __A).to(args.device)
_a = torch.zeros(__A , __A).to(args.device)
if head_mask is None:
_a = torch.ones(__A , __A).to(args.device)
head_mask.requires_grad_(requires_grad=__A)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0])):
_a = tuple(t.to(args.device) for t in inputs)
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(__A , labels=__A , head_mask=__A)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A):
_a = entropy(attn.detach() , __A)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(__A , __A).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''')
print_ad_tensor(__A)
if compute_importance:
logger.info('''Head importance scores''')
print_ad_tensor(__A)
logger.info('''Head ranked by importance scores''')
_a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
_a = torch.arange(
head_importance.numel() , device=args.device)
_a = head_ranks.view_as(__A)
print_ad_tensor(__A)
return attn_entropy, head_importance, total_loss
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a , _a , _a = compute_heads_importance(__A , __A , __A , compute_entropy=__A)
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold)
_a = torch.ones_like(__A)
_a = max(1 , int(new_head_mask.numel() * args.masking_amount))
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('''Inf''')
_a = head_importance.view(-1).sort()[1]
if len(__A) <= num_to_mask:
print('''BREAK BY num_to_mask''')
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist()))
_a = new_head_mask.view(-1)
_a = 0.0
_a = new_head_mask.view_as(__A)
_a = new_head_mask.clone().detach()
print_ad_tensor(__A)
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A)
_a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''')
print_ad_tensor(__A)
np.save(os.path.join(args.output_dir , '''head_mask.npy''') , head_mask.detach().cpu().numpy())
return head_mask
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A)
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters())
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A))
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A):
_a = [
v,
]
assert sum(len(__A) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A)
_a = sum(p.numel() for p in model.parameters())
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A)
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100)
save_model(__A , args.output_dir)
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''')
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''')
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''')
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''')
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''')
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''')
parser.add_argument('''--seed''' , type=__A , default=42)
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''')
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''')
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''')
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
_a = torch.device('''cuda''' , args.local_rank)
_a = 1
torch.distributed.init_process_group(backend='''nccl''') # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1)))
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A)
elif args.n_gpu > 1:
_a = nn.DataParallel(__A)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A)
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin'''))
logger.info('''Training/evaluation parameters %s''' , __A)
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
_a = (torch.from_numpy(__A),)
_a = TensorDataset(*__A)
_a = RandomSampler(__A)
_a = DataLoader(__A , sampler=__A , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(__A , __A , __A)
prune_heads(__A , __A , __A , __A)
if __name__ == "__main__":
main()
| 11 | 0 |
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
if isinstance(__A, __A ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__A, __A ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
__lowerCAmelCase = False
if num < 0:
__lowerCAmelCase = True
__lowerCAmelCase = -num
__lowerCAmelCase = []
while num > 0:
binary.insert(0, num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__A ) for e in binary )
return "0b" + "".join(str(__A ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = None
snake_case__ : Optional[Any] = None
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
return self.feat_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'feature_size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'sampling_rate' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'padding_value' ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
a_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
a_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
a_ : int = feat_extract.model_input_names[0]
a_ : str = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) for x, y in zip(SCREAMING_SNAKE_CASE__ , processed_features[input_name] ) ) )
a_ : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
a_ : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a_ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a_ : str = feat_extract.model_input_names[0]
a_ : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
a_ : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a_ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
a_ : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE__ )
a_ : str = self.feature_extraction_class(**self.feat_extract_dict )
a_ : Any = feat_extract.model_input_names[0]
a_ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
a_ : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a_ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Any:
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE__ : Optional[int] ):
a_ : Dict = len(input[0] )
for input_slice in input[1:]:
if len(SCREAMING_SNAKE_CASE__ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
return False
for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE__ ) , np.asarray(SCREAMING_SNAKE_CASE__ ) , atol=1E-3 ):
return False
return True
a_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
a_ : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = feat_extract.model_input_names[0]
a_ : List[Any] = BatchFeature({input_name: speech_inputs} )
a_ : Optional[Any] = self.feat_extract_tester.seq_length_diff
a_ : Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
a_ : List[str] = self.feat_extract_tester.min_seq_length
a_ : Union[str, Any] = self.feat_extract_tester.batch_size
a_ : Optional[int] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
a_ : Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = input_a[input_name]
a_ : int = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' )
a_ : str = input_a[input_name]
a_ : Optional[int] = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
a_ : List[Any] = input_a[input_name]
a_ : Tuple = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='np' )
a_ : Union[str, Any] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' )[input_name]
a_ : List[str] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
a_ : Optional[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
a_ : Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=1_0 )
a_ : int = input_a[input_name]
a_ : Tuple = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , pad_to_multiple_of=1_0 )
a_ : Tuple = input_a[input_name]
a_ : Optional[int] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , pad_to_multiple_of=1_0 , max_length=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = input_a[input_name]
a_ : Union[str, Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , pad_to_multiple_of=1_0 , max_length=SCREAMING_SNAKE_CASE__ , return_tensors='np' , )
a_ : Any = input_a[input_name]
self.assertTrue(all(len(SCREAMING_SNAKE_CASE__ ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
a_ : int = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(SCREAMING_SNAKE_CASE__ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
a_ : int = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : str=False ) -> List[Any]:
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE__ : List[Any] ):
a_ : int = len(input[0] )
for input_slice in input[1:]:
if len(SCREAMING_SNAKE_CASE__ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
return False
for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE__ ) , np.asarray(SCREAMING_SNAKE_CASE__ ) , atol=1E-3 ):
return False
return True
a_ : int = self.feature_extraction_class(**self.feat_extract_dict )
a_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE__ )
a_ : Any = feat_extract.model_input_names[0]
a_ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
a_ : Union[str, Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=SCREAMING_SNAKE_CASE__ )
a_ : int = input_a[input_name]
a_ : Any = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[0] ) )
a_ : str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
# truncate to smallest with np
a_ : Optional[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = input_a[input_name]
a_ : List[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
a_ : Optional[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
# truncate to middle
a_ : List[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='np' , )
a_ : Dict = input_a[input_name]
a_ : List[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=SCREAMING_SNAKE_CASE__ )
a_ : Any = input_a[input_name]
a_ : Tuple = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
a_ : List[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
feat_extract.pad(SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , truncation=SCREAMING_SNAKE_CASE__ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , truncation=SCREAMING_SNAKE_CASE__ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , truncation=SCREAMING_SNAKE_CASE__ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
a_ : int = 1_2
a_ : str = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
a_ : Union[str, Any] = input_a[input_name]
a_ : Optional[int] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , )
a_ : Any = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
a_ : Union[str, Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
a_ : Any = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
self._check_padding(numpify=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self._check_padding(numpify=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._check_truncation(numpify=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
self._check_truncation(numpify=SCREAMING_SNAKE_CASE__ )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a_ : str = self.feat_extract_tester.prepare_inputs_for_common()
a_ : Optional[int] = feat_extract.model_input_names[0]
a_ : Tuple = BatchFeature({input_name: speech_inputs} )
a_ : Tuple = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='np' )[input_name]
a_ : List[str] = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
a_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
a_ : Tuple = feat_extract.model_input_names[0]
a_ : str = BatchFeature({input_name: speech_inputs} )
a_ : str = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='np' )[input_name]
a_ : str = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : int = self.feat_extract_dict
a_ : int = True
a_ : List[Any] = self.feature_extraction_class(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
a_ : Optional[int] = [len(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
a_ : List[Any] = feat_extract.model_input_names[0]
a_ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
a_ : Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Optional[Any] = self.feat_extract_dict
a_ : Dict = True
a_ : Optional[int] = self.feature_extraction_class(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
a_ : int = [len(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
a_ : Union[str, Any] = feat_extract.model_input_names[0]
a_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
a_ : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 570 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
with open(__a, encoding="utf-8") as input_file:
_lowerCAmelCase : Union[str, Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_lowerCAmelCase : Optional[Any] = input_file.read()
_lowerCAmelCase : Union[str, Any] = regexp.search(__a)
return match
def snake_case__ ( self, __a):
'''simple docstring'''
with open(__a, encoding="utf-8") as input_file:
_lowerCAmelCase : List[Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL)
_lowerCAmelCase : Union[str, Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_lowerCAmelCase : Union[str, Any] = regexp.finditer(__a)
_lowerCAmelCase : List[str] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Path("./datasets")
_lowerCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__a)):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = Path("./datasets")
_lowerCAmelCase : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(__a)):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead.")
| 500 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowercase : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11 | 0 |
from __future__ import annotations
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if len(__A ) < k or k < 0:
raise ValueError("""Invalid Input""" )
UpperCAmelCase_= UpperCAmelCase_= sum(array[:k] )
for i in range(len(__A ) - k ):
UpperCAmelCase_= current_sum - array[i] + array[i + k]
UpperCAmelCase_= max(__A ,__A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__A = [randint(-1000, 1000) for i in range(100)]
__A = randint(0, 110)
print(f'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 593 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxAlbertModelTester(self )
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''albert-base-v2''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(A , attention_mask=A )[0]
_a = (1, 11, 768)
self.assertEqual(output.shape , A )
_a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_attention_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_choices
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_attention_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = FlaxAlbertModelTester(self )
@slow
def A__ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained("""albert-base-v2""" )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
_lowerCAmelCase = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_lowerCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
_lowerCAmelCase = (1, 11, 768)
self.assertEqual(output.shape , lowerCamelCase )
_lowerCAmelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1e-4 ) ) | 156 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : List[Any] = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 261 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Dict =['image_processor', 'tokenizer']
a_ :Union[str, Any] ='OwlViTImageProcessor'
a_ :Optional[int] =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE__ , )
__a = kwargs.pop("""feature_extractor""" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : str , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : int="max_length" , SCREAMING_SNAKE_CASE__ : Optional[Any]="np" , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(text[0] , SCREAMING_SNAKE_CASE__ )):
__a = [self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(text[0] , SCREAMING_SNAKE_CASE__ ):
__a = []
# Maximum number of queries across batch
__a = max([len(SCREAMING_SNAKE_CASE__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(SCREAMING_SNAKE_CASE__ ) != max_num_queries:
__a = t + [""" """] * (max_num_queries - len(SCREAMING_SNAKE_CASE__ ))
__a = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
encodings.append(SCREAMING_SNAKE_CASE__ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
__a = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__a = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__a = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__a = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__a = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
__a = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__a = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__a = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
__a = BatchEncoding()
__a = input_ids
__a = attention_mask
if query_images is not None:
__a = BatchEncoding()
__a = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).pixel_values
__a = query_pixel_values
if images is not None:
__a = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def __a ( self : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.image_processor.post_process(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __a ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __a ( self : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __a ( self : Any , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __a ( self : Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 582 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 0 |
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCAmelCase__ = get_logger()
lowerCAmelCase__ = None
class a__ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
'''simple docstring'''
super().__init__(features=lowercase )
import jax
from jaxlib.xla_client import Device
if isinstance(lowercase , lowercase ):
raise ValueError(
F'Expected {device} to be a `str` not {type(lowercase )}, as `jaxlib.xla_extension.Device` '
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A__ = device if isinstance(lowercase , lowercase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
A__ = str(jax.devices()[0] )
A__ = jnp_array_kwargs
@staticmethod
def UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(lowercase ): device for device in jax.devices()}
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(lowercase , lowercase ) and column:
if all(
isinstance(lowercase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowercase , axis=0 )
return column
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(lowercase , (str, bytes, type(lowercase )) ):
return value
elif isinstance(lowercase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A__ = {}
if isinstance(lowercase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A__ = {"dtype": jnp.intaa}
else:
A__ = {"dtype": jnp.intaa}
elif isinstance(lowercase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A__ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowercase , PIL.Image.Image ):
A__ = np.asarray(lowercase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowercase , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowercase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowercase , "__array__" ) and not isinstance(lowercase , jax.Array ):
A__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowercase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowercase ) for substruct in data_struct] )
elif isinstance(lowercase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowercase ) for substruct in data_struct] )
return self._tensorize(lowercase )
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowercase , map_list=lowercase )
def UpperCamelCase ( self , lowercase ) -> Mapping:
'''simple docstring'''
A__ = self.numpy_arrow_extractor().extract_row(lowercase )
A__ = self.python_features_decoder.decode_row(lowercase )
return self.recursive_tensorize(lowercase )
def UpperCamelCase ( self , lowercase ) -> "jax.Array":
'''simple docstring'''
A__ = self.numpy_arrow_extractor().extract_column(lowercase )
A__ = self.python_features_decoder.decode_column(lowercase , pa_table.column_names[0] )
A__ = self.recursive_tensorize(lowercase )
A__ = self._consolidate(lowercase )
return column
def UpperCamelCase ( self , lowercase ) -> Mapping:
'''simple docstring'''
A__ = self.numpy_arrow_extractor().extract_batch(lowercase )
A__ = self.python_features_decoder.decode_batch(lowercase )
A__ = self.recursive_tensorize(lowercase )
for column_name in batch:
A__ = self._consolidate(batch[column_name] )
return batch
| 514 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A_ = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 393 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_A = sys.version_info >= (3, 10)
def a__ ( lowerCAmelCase=None , lowerCAmelCase=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'titi'
SCREAMING_SNAKE_CASE = 'toto'
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'titi'
SCREAMING_SNAKE_CASE = 'toto'
SCREAMING_SNAKE_CASE = 4_2
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = "toto"
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = BasicEnum(self.foo )
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = "toto"
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase__ , metadata={'help': 'help message'} )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = list_field(default=[] )
SCREAMING_SNAKE_CASE = list_field(default=[] )
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = list_field(default=[] )
SCREAMING_SNAKE_CASE = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
SCREAMING_SNAKE_CASE = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field()
SCREAMING_SNAKE_CASE = field()
SCREAMING_SNAKE_CASE = field()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = BasicEnum(self.required_enum )
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = field()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = field(default='toto' , metadata={'help': 'help message'} )
SCREAMING_SNAKE_CASE = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase__ , metadata={'help': 'help message'} )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = list_field(default=[] )
SCREAMING_SNAKE_CASE = list_field(default=[] )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase__ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != """container"""}
UpperCAmelCase__ : str = {k: v for k, v in vars(_lowerCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , _lowerCamelCase ) and yy.get("""choices""" , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](_lowerCamelCase ) , yy["""type"""](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument("""--bar""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument("""--baz""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument("""--flag""" , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs="""?""" )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Dict = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((UpperCAmelCase__ ) , ) : Union[str, Any] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=_lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=_lowerCamelCase , help="""help message""" )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=_lowerCamelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=_lowerCamelCase , default=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCAmelCase__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
UpperCAmelCase__ : Tuple = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
UpperCAmelCase__ : Any = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
UpperCAmelCase__ : List[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
UpperCAmelCase__ : Any = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Any = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCAmelCase__ : Tuple = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase__ : int = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCAmelCase__ : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase__ : Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase__ : str = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a (self ):
"""simple docstring"""
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = "toto"
UpperCAmelCase__ : Any = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCAmelCase__ : List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCAmelCase__ : Union[str, Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : str = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_lowerCamelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_lowerCamelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase__ : Any = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument("""--bar""" , default=_lowerCamelCase , type=_lowerCamelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_lowerCamelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_lowerCamelCase )
UpperCAmelCase__ : Dict = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCAmelCase__ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
UpperCAmelCase__ : Tuple = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : int = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument("""--required_str""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_lowerCamelCase , )
expected.add_argument("""--opt""" , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=_lowerCamelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
UpperCAmelCase__ : Optional[Any] = parser.parse_dict(_lowerCamelCase )[0]
UpperCAmelCase__ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : str = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , """temp_json""" )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Any = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
UpperCAmelCase__ : Tuple = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = os.path.join(_lowerCamelCase , """temp_yaml""" )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
UpperCAmelCase__ : Union[str, Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 182 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__lowerCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = 'sgugger/tiny-distilbert-classification'
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , only_pretrain_model=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , torchscript=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , fpaa=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase ( self : Any ) -> int:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
# set architectures equal to `None`
__lowerCAmelCase = None
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowerCAmelCase_ , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = 'sshleifer/tinier_bart'
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase ( self : Any ) -> Dict:
__lowerCAmelCase = 'sshleifer/tinier_bart'
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase ( self : int ) -> int:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , save_to_csv=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCAmelCase_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowerCAmelCase_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowerCAmelCase_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowerCAmelCase_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowerCAmelCase_ , 'env.csv' ) , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , 'env.csv' ) ).exists() )
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowerCAmelCase_ : int ):
self.assertTrue(hasattr(lowerCAmelCase_ , 'sequential' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'cumulative' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'current' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCAmelCase_ , 'log.txt' ) , log_print=lowerCAmelCase_ , trace_memory_line_by_line=lowerCAmelCase_ , multi_process=lowerCAmelCase_ , )
__lowerCAmelCase = PyTorchBenchmark(lowerCAmelCase_ )
__lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , 'log.txt' ) ).exists() )
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = DistilBertTokenizer
snake_case__ : Union[str, Any] = DistilBertTokenizerFast
snake_case__ : Dict = True
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
a_ : List[str] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
a_ : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
a_ : Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 570 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11 | 0 |
import pprint
import requests
_snake_case = "https://zenquotes.io/api"
def A ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def A ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
_snake_case = random_quotes()
pprint.pprint(response)
| 500 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
'''simple docstring'''
def __init__(self ) -> Dict:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class __A ( A ):
'''simple docstring'''
def a__ (self , A , *A , **A ) -> Optional[Any]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __A ( A ):
'''simple docstring'''
def a__ (self , A , A ) -> int:
"""simple docstring"""
return output + 1
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
self.assertEqual(test_model._hf_hook , A )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
add_hook_to_module(A , A , append=A )
self.assertEqual(isinstance(test_model._hf_hook , A ) , A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , A , atol=1E-5 )
def a__ (self ) -> str:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , output + 2 , atol=1E-5 )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(A , AlignDevicesHook(io_same_device=A ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(A )
self.assertEqual(output.device , torch.device(0 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
_a = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(A , execution_device=A , offload=A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(A , execution_device=A , offload=A , offload_buffers=A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() , offload_buffers=A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 11 | 0 |
def lowercase ( __A : Any , __A : str ) -> List[Any]:
'''simple docstring'''
while a != 0:
snake_case , snake_case : Dict = b % a, a
return b
def lowercase ( __A : int , __A : str ) -> Any:
'''simple docstring'''
if gcd(__A , __A ) != 1:
snake_case : Optional[int] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__A )
snake_case , snake_case , snake_case : Union[str, Any] = 1, 0, a
snake_case , snake_case , snake_case : Dict = 0, 1, m
while va != 0:
snake_case : List[Any] = ua // va
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 36 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_= UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
UpperCAmelCase_= self.dummy_uncond_unet
UpperCAmelCase_= KarrasVeScheduler()
UpperCAmelCase_= KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= torch.manual_seed(0 )
UpperCAmelCase_= pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type="""numpy""" ).images
UpperCAmelCase_= torch.manual_seed(0 )
UpperCAmelCase_= pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type="""numpy""" , return_dict=__UpperCAmelCase )[0]
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= """google/ncsnpp-celebahq-256"""
UpperCAmelCase_= UNetaDModel.from_pretrained(__UpperCAmelCase )
UpperCAmelCase_= KarrasVeScheduler()
UpperCAmelCase_= KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= torch.manual_seed(0 )
UpperCAmelCase_= pipe(num_inference_steps=20 , generator=__UpperCAmelCase , output_type="""numpy""" ).images
UpperCAmelCase_= image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_= np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 593 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE : int = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'tapas'
def __init__(self , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1_024 , lowerCamelCase=[3, 256, 256, 2, 256, 256, 10] , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase=10.0 , lowerCamelCase=0 , lowerCamelCase=1.0 , lowerCamelCase=None , lowerCamelCase=1.0 , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=1.0 , lowerCamelCase=1.0 , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase="ratio" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=64 , lowerCamelCase=32 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_sizes
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
_lowerCAmelCase = positive_label_weight
_lowerCAmelCase = num_aggregation_labels
_lowerCAmelCase = aggregation_loss_weight
_lowerCAmelCase = use_answer_as_supervision
_lowerCAmelCase = answer_loss_importance
_lowerCAmelCase = use_normalized_answer_loss
_lowerCAmelCase = huber_loss_delta
_lowerCAmelCase = temperature
_lowerCAmelCase = aggregation_temperature
_lowerCAmelCase = use_gumbel_for_cells
_lowerCAmelCase = use_gumbel_for_aggregation
_lowerCAmelCase = average_approximation_function
_lowerCAmelCase = cell_selection_preference
_lowerCAmelCase = answer_loss_cutoff
_lowerCAmelCase = max_num_rows
_lowerCAmelCase = max_num_columns
_lowerCAmelCase = average_logits_per_cell
_lowerCAmelCase = select_one_column
_lowerCAmelCase = allow_empty_column_selection
_lowerCAmelCase = init_cell_selection_weights_to_zero
_lowerCAmelCase = reset_position_index_per_cell
_lowerCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
_lowerCAmelCase = aggregation_labels
_lowerCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase ):
_lowerCAmelCase = {int(lowerCamelCase ): v for k, v in aggregation_labels.items()} | 156 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return len(set(__A)) == len(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
def _A ( snake_case__ : List[Any] ):
snake_case__ : int = torch.load(__A , map_location='''cpu''' )
if "model" in sd.keys():
snake_case__ : List[str] = torch.load(__A , map_location='''cpu''' )['''model''']
# pop unnecessary weights
snake_case__ : Any = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A )
snake_case__ : List[str] = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case__ : Any = sd.pop(__A )
snake_case__ : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case__ : str = sd[key]
# We split QKV in separate Q,K,V
snake_case__ : List[str] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
snake_case__ : Any = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
snake_case__ : Optional[Any] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
snake_case__ : List[str] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case__ ,snake_case__ ,snake_case__ : Optional[Any] = torch.split(__A , depth // 3 , dim=0 )
snake_case__ : int = q
snake_case__ : Optional[int] = k
snake_case__ : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def _A ( snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any]=None ):
snake_case__ : Any = load_checkpoint(__A )
if config is not None:
snake_case__ : Optional[Any] = OPTConfig.from_pretrained(__A )
else:
snake_case__ : Optional[Any] = OPTConfig()
snake_case__ : Tuple = OPTModel(__A ).half().eval()
model.load_state_dict(__A )
# Check results
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_lowerCAmelCase : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 261 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if len(__A) == 0:
return False
_a = len(__A) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __A)
else:
return binary_search(a_list[midpoint + 1 :] , __A)
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be found in the list:\n").strip())
lowercase_ = "" if binary_search(sequence, target) else "not "
print(F"""{target} was {not_str}found in {sequence}""")
| 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=1_3 , SCREAMING_SNAKE_CASE__ : Any=7 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Any=3_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : str=3_7 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Any=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : int=0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = projection_dim
def __a ( self : int ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
__a = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
__a = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__a = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
__a = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__a = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
__a = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __a ( self : str ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :Dict =(
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a_ :List[str] ={'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
a_ :int =False
a_ :Any =False
a_ :Any =False
a_ :List[Any] =False
a_ :List[Any] =False
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = TFDPRModelTester(self )
__a = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def __a ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def __a ( self : str ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
__a = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
__a = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__a = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 582 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = len(A )
_a = [0] * len_array
if len_array > 0:
_a = array[0]
for i in range(1 , A ):
_a = self.prefix_sum[i - 1] + array[i]
def a__ (self , A , A ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self , A ) -> bool:
"""simple docstring"""
_a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int]="attention" ) -> Tuple:
'''simple docstring'''
A__ = params[F'{prefix}/layers_{i}/{layer_name}/key/kernel']
A__ = params[F'{prefix}/layers_{i}/{layer_name}/out/kernel']
A__ = params[F'{prefix}/layers_{i}/{layer_name}/query/kernel']
A__ = params[F'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Dict=False ) -> Any:
'''simple docstring'''
if split_mlp_wi:
A__ = params[F'{prefix}/layers_{i}/mlp/wi_0/kernel']
A__ = params[F'{prefix}/layers_{i}/mlp/wi_1/kernel']
A__ = (wi_a, wi_a)
else:
A__ = params[F'{prefix}/layers_{i}/mlp/wi/kernel']
A__ = params[F'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
return params[F'{prefix}/layers_{i}/{layer_name}/scale']
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , *, SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = traverse_util.flatten_dict(variables["target"] )
A__ = {"/".join(__A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , __A )
A__ = collections.OrderedDict()
# Shared embeddings.
A__ = old["token_embedder/embedding"]
# Encoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(__A , __A , "encoder" , "pre_attention_layer_norm" )
A__ , A__ , A__ , A__ = tax_attention_lookup(__A , __A , "encoder" , "attention" )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (MLP).
A__ = tax_layer_norm_lookup(__A , __A , "encoder" , "pre_mlp_layer_norm" )
A__ , A__ = tax_mlp_lookup(__A , __A , "encoder" , __A )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old[
"encoder/relpos_bias/rel_embedding"
].T
A__ = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(__A , __A , "decoder" , "pre_self_attention_layer_norm" )
A__ , A__ , A__ , A__ = tax_attention_lookup(__A , __A , "decoder" , "self_attention" )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (Cross Attention).
A__ = tax_layer_norm_lookup(__A , __A , "decoder" , "pre_cross_attention_layer_norm" )
A__ , A__ , A__ , A__ = tax_attention_lookup(__A , __A , "decoder" , "encoder_decoder_attention" )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 2 (MLP).
A__ = tax_layer_norm_lookup(__A , __A , "decoder" , "pre_mlp_layer_norm" )
A__ , A__ = tax_mlp_lookup(__A , __A , "decoder" , __A )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old["decoder/decoder_norm/scale"]
A__ = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ = old["decoder/logits_dense/kernel"].T
return new
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
A__ = state_dict["shared.weight"]
return state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any ) -> str:
'''simple docstring'''
A__ = checkpoints.load_tax_checkpoint(__A )
A__ = convert_tax_to_pytorch(__A , num_layers=config.num_layers , is_encoder_only=__A )
A__ = make_state_dict(__A , __A )
model.load_state_dict(__A , strict=__A )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str = False ) -> Optional[int]:
'''simple docstring'''
A__ = TaConfig.from_json_file(__A )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ = TaEncoderModel(__A )
else:
A__ = TaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A , __A , __A , __A )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__A )
# Verify that we can load the checkpoint.
model.from_pretrained(__A )
print("Done" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
lowerCAmelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 514 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 393 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
return 1 if input_a == input_a else 0
def a__ ( ) -> List[str]:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 182 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if os.path.exists(__A):
if os.path.exists(os.path.join(__A , '''config.json''')) and os.path.isfile(
os.path.join(__A , '''config.json''')):
os.remove(os.path.join(__A , '''config.json'''))
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''')) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''')):
os.remove(os.path.join(__A , '''pytorch_model.bin'''))
else:
os.makedirs(__A)
model.save_pretrained(__A)
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
_a = 2
if unlogit:
_a = torch.pow(__A , __A)
_a = p * torch.log(__A)
_a = 0
return -plogp.sum(dim=-1)
def lowerCAmelCase (__A):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__A))))
for row in range(len(__A)):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data))
def lowerCAmelCase (__A , __A , __A , __A=True , __A=True , __A=None , __A=False):
"""simple docstring"""
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(__A , __A).to(args.device)
_a = torch.zeros(__A , __A).to(args.device)
if head_mask is None:
_a = torch.ones(__A , __A).to(args.device)
head_mask.requires_grad_(requires_grad=__A)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0])):
_a = tuple(t.to(args.device) for t in inputs)
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(__A , labels=__A , head_mask=__A)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A):
_a = entropy(attn.detach() , __A)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(__A , __A).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''')
print_ad_tensor(__A)
if compute_importance:
logger.info('''Head importance scores''')
print_ad_tensor(__A)
logger.info('''Head ranked by importance scores''')
_a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
_a = torch.arange(
head_importance.numel() , device=args.device)
_a = head_ranks.view_as(__A)
print_ad_tensor(__A)
return attn_entropy, head_importance, total_loss
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a , _a , _a = compute_heads_importance(__A , __A , __A , compute_entropy=__A)
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold)
_a = torch.ones_like(__A)
_a = max(1 , int(new_head_mask.numel() * args.masking_amount))
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('''Inf''')
_a = head_importance.view(-1).sort()[1]
if len(__A) <= num_to_mask:
print('''BREAK BY num_to_mask''')
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist()))
_a = new_head_mask.view(-1)
_a = 0.0
_a = new_head_mask.view_as(__A)
_a = new_head_mask.clone().detach()
print_ad_tensor(__A)
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A)
_a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''')
print_ad_tensor(__A)
np.save(os.path.join(args.output_dir , '''head_mask.npy''') , head_mask.detach().cpu().numpy())
return head_mask
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A)
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters())
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A))
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A):
_a = [
v,
]
assert sum(len(__A) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A)
_a = sum(p.numel() for p in model.parameters())
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A)
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100)
save_model(__A , args.output_dir)
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''')
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''')
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''')
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''')
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''')
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''')
parser.add_argument('''--seed''' , type=__A , default=42)
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''')
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''')
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''')
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
_a = torch.device('''cuda''' , args.local_rank)
_a = 1
torch.distributed.init_process_group(backend='''nccl''') # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1)))
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A)
elif args.n_gpu > 1:
_a = nn.DataParallel(__A)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A)
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin'''))
logger.info('''Training/evaluation parameters %s''' , __A)
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
_a = (torch.from_numpy(__A),)
_a = TensorDataset(*__A)
_a = RandomSampler(__A)
_a = DataLoader(__A , sampler=__A , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(__A , __A , __A)
prune_heads(__A , __A , __A , __A)
if __name__ == "__main__":
main()
| 11 | 0 |
import os
import numpy
import onnx
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = a.name
__lowerCAmelCase = b.name
__lowerCAmelCase = ''
__lowerCAmelCase = ''
__lowerCAmelCase = a == b
__lowerCAmelCase = name_a
__lowerCAmelCase = name_b
return res
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : List[Any] ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__A, __A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, __A, __A )
_graph_replace_input_with(node_proto.attribute[1].g, __A, __A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, __A, __A )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int ):
for n in graph_proto.node:
_node_replace_input_with(__A, __A, __A )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : int, lowerCAmelCase_ : Dict ):
__lowerCAmelCase = list(model.graph.initializer )
__lowerCAmelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowerCAmelCase = inits[i].name
__lowerCAmelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, __A, __A )
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = os.path.dirname(__A )
__lowerCAmelCase = os.path.basename(__A )
__lowerCAmelCase = onnx.load(os.path.join(__A, __A ) )
__lowerCAmelCase = list(model.graph.initializer )
__lowerCAmelCase = set()
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(__A ) ):
if i in dup_set:
continue
for j in range(i + 1, len(__A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(__A )
dup_set.add(__A )
__lowerCAmelCase = inits[j].data_type
__lowerCAmelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ', __A )
total_reduced_size += mem_size
__lowerCAmelCase = inits[i].name
__lowerCAmelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__A )
else:
__lowerCAmelCase = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ', total_reduced_size / 1024 / 1024 / 1024, 'GB' )
__lowerCAmelCase = sorted(__A )
_remove_dup_initializers_from_model(__A, __A, __A )
__lowerCAmelCase = 'optimized_' + model_file_name
__lowerCAmelCase = os.path.join(__A, __A )
onnx.save(__A, __A )
return new_model
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] ) -> None:
a_ : Optional[Any] = {} # Mapping from char to TrieNode
a_ : Dict = False
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> None:
for word in words:
self.insert(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> None:
a_ : Optional[Any] = self
for char in word:
if char not in curr.nodes:
a_ : Dict = TrieNode()
a_ : List[Any] = curr.nodes[char]
a_ : Tuple = True
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> bool:
a_ : Dict = self
for char in word:
if char not in curr.nodes:
return False
a_ : str = curr.nodes[char]
return curr.is_leaf
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> None:
def _delete(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> bool:
if index == len(SCREAMING_SNAKE_CASE__ ):
# If word does not exist
if not curr.is_leaf:
return False
a_ : List[str] = False
return len(curr.nodes ) == 0
a_ : Optional[Any] = word[index]
a_ : Union[str, Any] = curr.nodes.get(SCREAMING_SNAKE_CASE__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
a_ : List[Any] = _delete(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE__ , 0 )
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Union[str, Any] ) -> int:
"""simple docstring"""
if node.is_leaf:
print(__A , end=' ' )
for key, value in node.nodes.items():
print_words(__A , word + key )
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
a_ : List[Any] = 'banana bananas bandana band apple all beast'.split()
a_ : Any = TrieNode()
root.insert_many(__A )
# print_words(root, "")
assert all(root.find(__A ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Union[str, Any] ) -> int:
"""simple docstring"""
print(str(__A ) , 'works!' if passes else 'doesn\'t work :(' )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 570 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : List[Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : List[Any] = BeautifulSoup(requests.get(__A , headers=__A ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : List[Any] = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Tuple = item.ha.text
_lowerCAmelCase : Optional[Any] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : Union[str, Any] = "Not available"
try:
_lowerCAmelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Dict = ""
try:
_lowerCAmelCase : Optional[Any] = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : List[str] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[Any] = " "
_lowerCAmelCase : Optional[Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 500 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowercase : Tuple = ['''gpt2''']
__lowercase : List[Any] = '''gpt2'''
if is_tf_available():
class _A ( tf.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Optional[int] = tokenizer
snake_case : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : int = TFGPTaLMHeadModel.from_config(SCREAMING_SNAKE_CASE_ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name="""text""" ),) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = tokenized["""input_ids"""].to_tensor()
snake_case : int = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case : Any = self.model(input_ids=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
super().setUp()
snake_case : Tuple = [GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case : Optional[Any] = [TFGPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case : Optional[Any] = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we\'re going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case : List[str] = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def snake_case_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
snake_case : List[str] = tokenizer([test_inputs] ,return_tensors="""tf""" )
snake_case : List[Any] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case : Union[str, Any] = python_outputs[key].numpy()
snake_case : Union[str, Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(SCREAMING_SNAKE_CASE_ ,tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : List[Any] = tf.function(SCREAMING_SNAKE_CASE_ )
for test_inputs in self.test_sentences:
snake_case : List[str] = tf.constant(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = compiled_tokenizer(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Any = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE_ )
snake_case : str = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[Any] = model.serving(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case : Any = Path(SCREAMING_SNAKE_CASE_ ) / """saved.model"""
tf.saved_model.save(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,signatures={"""serving_default""": model.serving} )
snake_case : int = tf.saved_model.load(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = loaded_model.signatures["""serving_default"""](SCREAMING_SNAKE_CASE_ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[Any] = tf_tokenizer(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs
snake_case : str = tf_tokenizer.get_config()
snake_case : Optional[Any] = TFGPTaTokenizer.from_config(SCREAMING_SNAKE_CASE_ )
snake_case : int = model_from_config(SCREAMING_SNAKE_CASE_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case : Optional[int] = 123123
for max_length in [3, 5, 1024]:
snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : Optional[int] = tf_tokenizer(SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ )
snake_case : str = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 36 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowercase ( snake_case__):
"""simple docstring"""
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_= """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCAmelCase_= """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCAmelCase_= """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCAmelCase_= """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task="""fill-mask""" , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
UpperCAmelCase_= [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCAmelCase_= self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase_= """1"""
UpperCAmelCase_= subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
UpperCAmelCase_= """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCAmelCase_= """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCAmelCase_= """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCAmelCase_= """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task="""fill-mask""" , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
UpperCAmelCase_= [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCAmelCase_= self.get_env()
UpperCAmelCase_= subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_= """
from transformers import BertConfig, BertModel, BertTokenizer
"""
UpperCAmelCase_= """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
UpperCAmelCase_= """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
UpperCAmelCase_= [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCAmelCase_= self.get_env()
UpperCAmelCase_= subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
UpperCAmelCase_= [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase_= """1"""
UpperCAmelCase_= subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_= """
from transformers import pipeline
"""
UpperCAmelCase_= """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
UpperCAmelCase_= """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
UpperCAmelCase_= self.get_env()
UpperCAmelCase_= """1"""
UpperCAmelCase_= [sys.executable, """-c""", """\n""".join([load, mock, run] )]
UpperCAmelCase_= subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_= """
from transformers import AutoModel
"""
UpperCAmelCase_= """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
UpperCAmelCase_= [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCAmelCase_= self.get_env()
UpperCAmelCase_= subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase_= """1"""
UpperCAmelCase_= subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 593 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxAlbertModelTester(self )
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''albert-base-v2''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(A , attention_mask=A )[0]
_a = (1, 11, 768)
self.assertEqual(output.shape , A )
_a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 | 0 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Dict=5 ) -> Any:
"""simple docstring"""
assert masked_input.count("""<mask>""" ) == 1
_lowerCAmelCase = torch.tensor(tokenizer.encode(__A , add_special_tokens=__A ) ).unsqueeze(0 ) # Batch size 1
_lowerCAmelCase = model(__A )[0] # The last hidden-state is the first element of the output tuple
_lowerCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_lowerCAmelCase = logits[0, masked_index, :]
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase , _lowerCAmelCase = prob.topk(k=__A , dim=0 )
_lowerCAmelCase = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__A ) )] )
_lowerCAmelCase = tokenizer.mask_token
_lowerCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
_lowerCAmelCase = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(__A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(__A ) , __A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__A , __A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
SCREAMING_SNAKE_CASE : Union[str, Any] = CamembertTokenizer.from_pretrained('''camembert-base''')
SCREAMING_SNAKE_CASE : Any = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 156 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['image_processor']
_lowerCAmelCase = 'SamImageProcessor'
def __init__( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(lowerCamelCase )
snake_case__ : Tuple = self.image_processor
snake_case__ : Optional[int] = -10
snake_case__ : int = self.image_processor.size['''longest_edge''']
def __call__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = None , **lowerCamelCase , ) -> BatchEncoding:
"""simple docstring"""
snake_case__ : str = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
# pop arguments that are not used in the foward but used nevertheless
snake_case__ : int = encoding_image_processor['''original_sizes''']
if hasattr(lowerCamelCase , '''numpy''' ): # Checks if Torch or TF tensor
snake_case__ : List[Any] = original_sizes.numpy()
snake_case__ ,snake_case__ ,snake_case__ : Optional[int] = self._check_and_preprocess_points(
input_points=lowerCamelCase , input_labels=lowerCamelCase , input_boxes=lowerCamelCase , )
snake_case__ : List[Any] = self._normalize_and_convert(
lowerCamelCase , lowerCamelCase , input_points=lowerCamelCase , input_labels=lowerCamelCase , input_boxes=lowerCamelCase , return_tensors=lowerCamelCase , )
return encoding_image_processor
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="pt" , ) -> Dict:
"""simple docstring"""
if input_points is not None:
if len(lowerCamelCase ) != len(lowerCamelCase ):
snake_case__ : Optional[int] = [
self._normalize_coordinates(self.target_size , lowerCamelCase , original_sizes[0] ) for point in input_points
]
else:
snake_case__ : Any = [
self._normalize_coordinates(self.target_size , lowerCamelCase , lowerCamelCase )
for point, original_size in zip(lowerCamelCase , lowerCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
snake_case__ ,snake_case__ : Any = self._pad_points_and_labels(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = np.array(lowerCamelCase )
if input_labels is not None:
snake_case__ : Optional[Any] = np.array(lowerCamelCase )
if input_boxes is not None:
if len(lowerCamelCase ) != len(lowerCamelCase ):
snake_case__ : Optional[int] = [
self._normalize_coordinates(self.target_size , lowerCamelCase , original_sizes[0] , is_bounding_box=lowerCamelCase )
for box in input_boxes
]
else:
snake_case__ : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowerCamelCase , lowerCamelCase , is_bounding_box=lowerCamelCase )
for box, original_size in zip(lowerCamelCase , lowerCamelCase )
]
snake_case__ : List[str] = np.array(lowerCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
snake_case__ : Dict = torch.from_numpy(lowerCamelCase )
# boxes batch size of 1 by default
snake_case__ : int = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
snake_case__ : List[str] = tf.convert_to_tensor(lowerCamelCase )
# boxes batch size of 1 by default
snake_case__ : Dict = tf.expand_dims(lowerCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
snake_case__ : int = torch.from_numpy(lowerCamelCase )
# point batch size of 1 by default
snake_case__ : str = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
snake_case__ : Dict = tf.convert_to_tensor(lowerCamelCase )
# point batch size of 1 by default
snake_case__ : Tuple = tf.expand_dims(lowerCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
snake_case__ : Union[str, Any] = torch.from_numpy(lowerCamelCase )
# point batch size of 1 by default
snake_case__ : List[str] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
snake_case__ : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase )
# point batch size of 1 by default
snake_case__ : Dict = tf.expand_dims(lowerCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Union[str, Any] = max([point.shape[0] for point in input_points] )
snake_case__ : List[str] = []
for i, point in enumerate(lowerCamelCase ):
if point.shape[0] != expected_nb_points:
snake_case__ : List[str] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
snake_case__ : Optional[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowerCamelCase )
snake_case__ : Optional[int] = processed_input_points
return input_points, input_labels
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : List[Any] = original_size
snake_case__ ,snake_case__ : List[Any] = self.image_processor._get_preprocess_shape(lowerCamelCase , longest_edge=lowerCamelCase )
snake_case__ : Optional[int] = deepcopy(lowerCamelCase ).astype(lowerCamelCase )
if is_bounding_box:
snake_case__ : List[str] = coords.reshape(-1 , 2 , 2 )
snake_case__ : Dict = coords[..., 0] * (new_w / old_w)
snake_case__ : Union[str, Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
snake_case__ : Optional[int] = coords.reshape(-1 , 4 )
return coords
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Dict:
"""simple docstring"""
if input_points is not None:
if hasattr(lowerCamelCase , '''numpy''' ): # Checks for TF or Torch tensor
snake_case__ : Optional[Any] = input_points.numpy().tolist()
if not isinstance(lowerCamelCase , lowerCamelCase ) or not isinstance(input_points[0] , lowerCamelCase ):
raise ValueError('''Input points must be a list of list of floating points.''' )
snake_case__ : List[Any] = [np.array(lowerCamelCase ) for input_point in input_points]
else:
snake_case__ : List[str] = None
if input_labels is not None:
if hasattr(lowerCamelCase , '''numpy''' ):
snake_case__ : int = input_labels.numpy().tolist()
if not isinstance(lowerCamelCase , lowerCamelCase ) or not isinstance(input_labels[0] , lowerCamelCase ):
raise ValueError('''Input labels must be a list of list integers.''' )
snake_case__ : Any = [np.array(lowerCamelCase ) for label in input_labels]
else:
snake_case__ : int = None
if input_boxes is not None:
if hasattr(lowerCamelCase , '''numpy''' ):
snake_case__ : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowerCamelCase , lowerCamelCase )
or not isinstance(input_boxes[0] , lowerCamelCase )
or not isinstance(input_boxes[0][0] , lowerCamelCase )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
snake_case__ : List[str] = [np.array(lowerCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
snake_case__ : Any = None
return input_points, input_labels, input_boxes
@property
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(lowerCamelCase ) )
def lowercase__ ( self , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.image_processor.post_process_masks(*lowerCamelCase , **lowerCamelCase )
| 261 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = math.inf , __SCREAMING_SNAKE_CASE = -math.inf , __SCREAMING_SNAKE_CASE = math.inf , __SCREAMING_SNAKE_CASE = -math.inf , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 100 , __SCREAMING_SNAKE_CASE = 0.01 , __SCREAMING_SNAKE_CASE = 1 , ) -> Optional[Any]:
"""simple docstring"""
__a = False
__a = search_prob
__a = start_temperate
__a = []
__a = 0
__a = None
while not search_end:
__a = current_state.score()
if best_state is None or current_score > best_state.score():
__a = current_state
scores.append(__A )
iterations += 1
__a = None
__a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__a = random.randint(0 , len(__A ) - 1 ) # picking a random neighbor
__a = neighbors.pop(__A )
__a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__a = picked_neighbor
else:
__a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__a = picked_neighbor
__a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__a = True
else:
__a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__A ) , __A )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_ = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_ = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
SCREAMING_SNAKE_CASE_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
SCREAMING_SNAKE_CASE_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
| 582 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
super().__init__()
A__ = nn.Linear(3 , 4 )
A__ = nn.BatchNormad(4 )
A__ = nn.Linear(4 , 5 )
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowercase ) ) )
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> Optional[Any]:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
return output + 1
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = ModelForTest()
A__ = ModelHook()
add_hook_to_module(lowercase , lowercase )
self.assertEqual(test_model._hf_hook , lowercase )
self.assertTrue(hasattr(lowercase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(lowercase )
self.assertFalse(hasattr(lowercase , "_hf_hook" ) )
self.assertFalse(hasattr(lowercase , "_old_forward" ) )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = ModelForTest()
A__ = ModelHook()
add_hook_to_module(lowercase , lowercase )
add_hook_to_module(lowercase , lowercase , append=lowercase )
self.assertEqual(isinstance(test_model._hf_hook , lowercase ) , lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowercase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(lowercase )
self.assertFalse(hasattr(lowercase , "_hf_hook" ) )
self.assertFalse(hasattr(lowercase , "_old_forward" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = ModelForTest()
A__ = torch.randn(2 , 3 )
A__ = test_model(x + 1 )
A__ = test_model(x + 2 )
A__ = PreForwardHook()
add_hook_to_module(lowercase , lowercase )
A__ = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A__ = PreForwardHook()
add_hook_to_module(lowercase , lowercase )
A__ = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A__ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowercase , lowercase )
A__ = test_model(lowercase )
assert torch.allclose(lowercase , lowercase , atol=1e-5 )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = ModelForTest()
A__ = torch.randn(2 , 3 )
A__ = test_model(lowercase )
A__ = PostForwardHook()
add_hook_to_module(lowercase , lowercase )
A__ = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A__ = PostForwardHook()
add_hook_to_module(lowercase , lowercase )
A__ = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A__ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowercase , lowercase )
A__ = test_model(lowercase )
assert torch.allclose(lowercase , output + 2 , atol=1e-5 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = ModelForTest()
A__ = torch.randn(2 , 3 )
A__ = test_model(lowercase )
A__ = PostForwardHook()
add_hook_to_module(lowercase , lowercase )
A__ = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
A__ = True
A__ = test_model(lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A__ = torch.randn(2 , 3 )
A__ = model(lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowercase , AlignDevicesHook(io_same_device=lowercase ) )
A__ = torch.randn(2 , 3 ).to(0 )
A__ = model(lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
A__ = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , lowercase )
A__ = torch.randn(2 , 3 )
A__ = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
A__ = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
A__ = torch.randn(2 , 3 )
A__ = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
A__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(lowercase , execution_device=lowercase , offload=lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ = torch.device(lowercase )
self.assertEqual(model.batchnorm.running_mean.device , lowercase )
A__ = torch.randn(2 , 3 )
A__ = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowercase , execution_device=lowercase , offload=lowercase , offload_buffers=lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
A__ = torch.randn(2 , 3 )
A__ = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
A__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
lowercase , execution_device=lowercase , offload=lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ = torch.device(lowercase )
self.assertEqual(model.batchnorm.running_mean.device , lowercase )
A__ = torch.randn(2 , 3 )
A__ = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowercase , execution_device=lowercase , offload=lowercase , weights_map=model.state_dict() , offload_buffers=lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
A__ = torch.randn(2 , 3 )
A__ = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 514 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 | 0 |
import sys
import turtle
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> Dict:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,)-> Tuple:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
A_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
A_ = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 393 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
with open(lowercase_ ) as metadata_file:
lowercase__ : Any = json.load(lowercase_ )
lowercase__ : Tuple = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowercase__ : List[str] = torch.load(lowercase_ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
lowercase__ : Any = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
lowercase__ : int = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowercase__ : str = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ : Any = AddedToken("""<ent>""" , lstrip=lowercase_ , rstrip=lowercase_ )
lowercase__ : List[Any] = AddedToken("""<ent2>""" , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , """tokenizer_config.json""" ) , """r""" ) as f:
lowercase__ : Tuple = json.load(lowercase_ )
lowercase__ : Tuple = """MLukeTokenizer"""
with open(os.path.join(lowercase_ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
with open(os.path.join(lowercase_ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
lowercase__ : str = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
lowercase__ : int = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
lowercase__ : int = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
lowercase__ : int = state_dict["""embeddings.word_embeddings.weight"""]
lowercase__ : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
lowercase__ : Optional[Any] = word_emb[enta_init_index].unsqueeze(0 )
lowercase__ : int = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowercase__ : Any = state_dict[bias_name]
lowercase__ : Union[str, Any] = decoder_bias[ent_init_index].unsqueeze(0 )
lowercase__ : List[str] = decoder_bias[enta_init_index].unsqueeze(0 )
lowercase__ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ : List[str] = F'encoder.layer.{layer_index}.attention.self.'
lowercase__ : Any = state_dict[prefix + matrix_name]
lowercase__ : int = state_dict[prefix + matrix_name]
lowercase__ : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ : Optional[int] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowercase__ : Union[str, Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowercase__ : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowercase__ : List[str] = state_dict["""entity_predictions.bias"""]
lowercase__ : List[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowercase__ : List[str] = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowercase__ : Dict = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
lowercase__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
lowercase__ : Dict = state_dict[key]
else:
lowercase__ : Optional[Any] = state_dict[key]
lowercase__ , lowercase__ : int = model.load_state_dict(lowercase_ , strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowercase__ : Union[str, Any] = MLukeTokenizer.from_pretrained(lowercase_ , task="""entity_classification""" )
lowercase__ : List[Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowercase__ : List[str] = (0, 9)
lowercase__ : List[Any] = tokenizer(lowercase_ , entity_spans=[span] , return_tensors="""pt""" )
lowercase__ : Dict = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase__ : Tuple = torch.Size((1, 33, 7_68) )
lowercase__ : Any = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase__ : List[str] = torch.Size((1, 1, 7_68) )
lowercase__ : List[str] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
lowercase__ : Any = MLukeTokenizer.from_pretrained(lowercase_ )
lowercase__ : int = """Tokyo is the capital of <mask>."""
lowercase__ : List[str] = (24, 30)
lowercase__ : Union[str, Any] = tokenizer(lowercase_ , entity_spans=[span] , return_tensors="""pt""" )
lowercase__ : str = model(**lowercase_ )
lowercase__ : Optional[int] = encoding["""input_ids"""][0].tolist()
lowercase__ : Optional[int] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
lowercase__ : List[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
lowercase__ : str = outputs.entity_logits[0][0].argmax().item()
lowercase__ : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowercase__ : Optional[int] = [json.loads(lowercase_ ) for line in open(lowercase_ )]
lowercase__ : str = {}
for entry in data:
lowercase__ : Tuple = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowercase__ : str = entity_id
break
lowercase__ : Dict = F'{language}:{entity_name}'
lowercase__ : Dict = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCamelCase__ : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 12 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowerCamelCase__ : Dict = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowerCamelCase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = 'whisper'
__lowerCAmelCase : str = ['past_key_values']
__lowerCAmelCase : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_18_65 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=15_00 , SCREAMING_SNAKE_CASE_=4_48 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : str = vocab_size
lowercase__ : Tuple = num_mel_bins
lowercase__ : int = d_model
lowercase__ : str = encoder_layers
lowercase__ : str = encoder_attention_heads
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : Dict = encoder_ffn_dim
lowercase__ : Dict = dropout
lowercase__ : Dict = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : List[str] = activation_function
lowercase__ : Dict = init_std
lowercase__ : int = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : List[str] = use_cache
lowercase__ : Tuple = encoder_layers
lowercase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : int = max_source_positions
lowercase__ : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ : Tuple = classifier_proj_size
lowercase__ : Optional[int] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : Optional[int] = apply_spec_augment
lowercase__ : Any = mask_time_prob
lowercase__ : str = mask_time_length
lowercase__ : int = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : List[Any] = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
lowercase__ : Dict = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
])
if self.use_past:
lowercase__ : List[str] = {0: """batch"""}
else:
lowercase__ : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""")
return common_inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 2_20_50 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 2_20 , ):
'''simple docstring'''
lowercase__ : str = OrderedDict()
lowercase__ : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = encoder_inputs["""input_features"""].shape[2]
lowercase__ : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ : Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = encoder_inputs.pop("""input_features""")
lowercase__ : Optional[Any] = decoder_inputs.pop("""decoder_input_ids""")
if "past_key_values" in decoder_inputs:
lowercase__ : Dict = decoder_inputs.pop("""past_key_values""")
return dummy_inputs
@property
def lowercase__ ( self):
'''simple docstring'''
return 1E-3
| 12 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = IFPipeline
__lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
__lowerCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase__ ( self):
'''simple docstring'''
return self._get_dummy_components()
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE_).startswith("""mps"""):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_)
else:
lowercase__ : int = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def lowercase__ ( self):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def lowercase__ ( self):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def lowercase__ ( self):
'''simple docstring'''
self._test_save_load_local()
def lowercase__ ( self):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
lowercase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
lowercase__ , lowercase__ : Dict = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase__ : str = None
lowercase__ : Optional[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase__ : List[Any] = IFImgaImgPipeline(**pipe_a.components)
lowercase__ : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase__ : str = IFInpaintingPipeline(**pipe_a.components)
lowercase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
_start_torch_memory_measurement()
lowercase__ : str = torch.Generator(device="""cpu""").manual_seed(0)
lowercase__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
lowercase__ : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase__ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowercase__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# pipeline 2
_start_torch_memory_measurement()
lowercase__ : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
lowercase__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="""np""" , )
lowercase__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
_start_torch_memory_measurement()
lowercase__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(SCREAMING_SNAKE_CASE_)
lowercase__ : int = torch.Generator(device="""cpu""").manual_seed(0)
lowercase__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
lowercase__ : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
lowercase__ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase__ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# pipeline 2
_start_torch_memory_measurement()
lowercase__ : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
lowercase__ : Dict = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0)).to(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(SCREAMING_SNAKE_CASE_)
lowercase__ : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , original_image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="""np""" , )
lowercase__ : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowercase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
_start_torch_memory_measurement()
lowercase__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(SCREAMING_SNAKE_CASE_)
lowercase__ : str = torch.Generator(device="""cpu""").manual_seed(0)
lowercase__ : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
lowercase__ : Any = output.images[0]
assert image.shape == (64, 64, 3)
lowercase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# pipeline 2
_start_torch_memory_measurement()
lowercase__ : List[str] = torch.Generator(device="""cpu""").manual_seed(0)
lowercase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0)).to(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1)).to(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , original_image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="""np""" , )
lowercase__ : Optional[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowercase__ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 12 |
def UpperCamelCase ( lowercase_ ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 1 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
lowercase__ : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Tuple = do_pad
lowercase__ : Optional[Any] = pad_size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase__ : str = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_pad:
lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(lowercase_ , lowercase_ , lowercase_=0 , lowercase_=None ):
lowercase__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ : List[str] = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ : Dict = math.ceil(val / multiple ) * multiple
return x
lowercase__ : Optional[int] = (output_size, output_size) if isinstance(lowercase_ , lowercase_ ) else output_size
lowercase__ , lowercase__ : List[Any] = get_image_size(lowercase_ )
lowercase__ , lowercase__ : Union[str, Any] = output_size
# determine new height and width
lowercase__ : Optional[int] = output_height / input_height
lowercase__ : Optional[int] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ : Tuple = scale_width
else:
# fit height
lowercase__ : List[Any] = scale_height
lowercase__ : Optional[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase_ )
lowercase__ : List[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase_ )
return (new_height, new_width)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
lowercase__ : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_)
lowercase__ : str = do_resize
lowercase__ : List[Any] = size
lowercase__ : List[Any] = keep_aspect_ratio
lowercase__ : Dict = ensure_multiple_of
lowercase__ : Optional[int] = resample
lowercase__ : Optional[int] = do_rescale
lowercase__ : str = rescale_factor
lowercase__ : Tuple = do_normalize
lowercase__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE_)
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}')
lowercase__ : Tuple = get_resize_output_image_size(
SCREAMING_SNAKE_CASE_ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=SCREAMING_SNAKE_CASE_ , multiple=SCREAMING_SNAKE_CASE_ , )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : str = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : str = get_size_dict(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ : Union[str, Any] = resample if resample is not None else self.resample
lowercase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : str = image_mean if image_mean is not None else self.image_mean
lowercase__ : int = image_std if image_std is not None else self.image_std
lowercase__ : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
lowercase__ : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_resize:
lowercase__ : str = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_normalize:
lowercase__ : List[str] = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_) != len(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""")
if is_torch_tensor(SCREAMING_SNAKE_CASE_):
lowercase__ : List[Any] = target_sizes.numpy()
lowercase__ : List[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_)):
lowercase__ : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(SCREAMING_SNAKE_CASE_)
else:
lowercase__ : Dict = logits.argmax(dim=1)
lowercase__ : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 12 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase__ : Optional[int] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[Any] = True
while ask_again:
lowercase__ : Tuple = input(lowercase_ )
try:
if default is not None and len(lowercase_ ) == 0:
return default
return convert_value(lowercase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ )
lowercase__ : Any = menu.run(default_choice=lowercase_ )
return convert_value(lowercase_ ) if convert_value is not None else result
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = int(lowercase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[str] = int(lowercase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = int(lowercase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _snake_case ( argparse.RawDescriptionHelpFormatter ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""")
return usage
| 12 | 1 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
lowerCamelCase__ : Optional[int] = logging.getLogger(__name__)
lowerCamelCase__ : int = {"""facebook/bart-base""": BartForConditionalGeneration}
lowerCamelCase__ : List[Any] = {"""facebook/bart-base""": BartTokenizer}
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase__ : Union[str, Any] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=lowercase_ , default=lowercase_ , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=lowercase_ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=lowercase_ , default=lowercase_ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase_ , )
parser.add_argument(
"""--config_name""" , type=lowercase_ , default=lowercase_ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=lowercase_ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=lowercase_ , default=lowercase_ , help="""Where to store the final ONNX file.""" )
lowercase__ : str = parser.parse_args()
return args
def UpperCamelCase ( lowercase_ , lowercase_="cpu" ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = model_dict[model_name].from_pretrained(lowercase_ ).to(lowercase_ )
lowercase__ : Tuple = tokenizer_dict[model_name].from_pretrained(lowercase_ )
if model_name in ["facebook/bart-base"]:
lowercase__ : Optional[Any] = 0
lowercase__ : Optional[int] = None
lowercase__ : List[Any] = 0
return huggingface_model, tokenizer
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
model.eval()
lowercase__ : int = None
lowercase__ : Any = torch.jit.script(BARTBeamSearchGenerator(lowercase_ ) )
with torch.no_grad():
lowercase__ : Union[str, Any] = """My friends are cool but they eat too many carbs."""
lowercase__ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="""pt""" ).to(model.device )
lowercase__ : Optional[int] = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=lowercase_ , max_length=lowercase_ , early_stopping=lowercase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase_ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase_ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=lowercase_ , )
logger.info("""Model exported to {}""".format(lowercase_ ) )
lowercase__ : str = remove_dup_initializers(os.path.abspath(lowercase_ ) )
logger.info("""Deduplicated and optimized model written to {}""".format(lowercase_ ) )
lowercase__ : Optional[Any] = onnxruntime.InferenceSession(lowercase_ )
lowercase__ : Optional[int] = ort_sess.run(
lowercase_ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(lowercase_ ),
"""max_length""": np.array(lowercase_ ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase__ : Dict = parse_args()
lowercase__ : List[Any] = 5
lowercase__ : Any = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase__ : Optional[Any] = torch.device(args.device )
lowercase__ , lowercase__ : str = load_model_tokenizer(args.model_name_or_path , lowercase_ )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(lowercase_ )
if args.max_length:
lowercase__ : List[Any] = args.max_length
if args.num_beams:
lowercase__ : List[Any] = args.num_beams
if args.output_file_path:
lowercase__ : str = args.output_file_path
else:
lowercase__ : int = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 12 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : int = LxmertTokenizer
__lowerCAmelCase : Any = LxmertTokenizerFast
__lowerCAmelCase : Dict = True
__lowerCAmelCase : List[Any] = True
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Tuple = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = """UNwant\u00E9d,running"""
lowercase__ : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file)
lowercase__ : List[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [7, 4, 5, 10, 8, 9])
def lowercase__ ( self):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Dict = self.get_rust_tokenizer()
lowercase__ : List[Any] = """I was born in 92000, and this is falsé."""
lowercase__ : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.get_rust_tokenizer()
lowercase__ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12 | 1 |
# Imports
import numpy as np
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
self.set_matricies(red=SCREAMING_SNAKE_CASE_ , green=SCREAMING_SNAKE_CASE_ , blue=SCREAMING_SNAKE_CASE_ , red_edge=SCREAMING_SNAKE_CASE_ , nir=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
if red is not None:
lowercase__ : List[str] = red
if green is not None:
lowercase__ : Union[str, Any] = green
if blue is not None:
lowercase__ : int = blue
if red_edge is not None:
lowercase__ : Optional[Any] = red_edge
if nir is not None:
lowercase__ : Optional[Any] = nir
return True
def lowercase__ ( self , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
self.set_matricies(red=SCREAMING_SNAKE_CASE_ , green=SCREAMING_SNAKE_CASE_ , blue=SCREAMING_SNAKE_CASE_ , red_edge=SCREAMING_SNAKE_CASE_ , nir=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""")
return False
def lowercase__ ( self):
'''simple docstring'''
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def lowercase__ ( self):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowercase__ ( self):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def lowercase__ ( self):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowercase__ ( self):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def lowercase__ ( self):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def lowercase__ ( self):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowercase__ ( self):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def lowercase__ ( self):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowercase__ ( self):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowercase__ ( self):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowercase__ ( self):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0.0_8 , SCREAMING_SNAKE_CASE_=1.2_2 , SCREAMING_SNAKE_CASE_=0.0_3):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowercase__ ( self):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowercase__ ( self):
'''simple docstring'''
return (self.nir / self.green) - 1
def lowercase__ ( self):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def lowercase__ ( self):
'''simple docstring'''
return (self.red - self.blue) / self.red
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def lowercase__ ( self):
'''simple docstring'''
return self.nir - self.green
def lowercase__ ( self):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0.1_6):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0.5):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowercase__ ( self):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def lowercase__ ( self):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowercase__ ( self):
'''simple docstring'''
return (self.red + self.green + self.blue) / 3_0.5
def lowercase__ ( self):
'''simple docstring'''
return self.nir / self.red
def lowercase__ ( self):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def lowercase__ ( self):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowercase__ ( self):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def lowercase__ ( self):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def lowercase__ ( self):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def lowercase__ ( self):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def lowercase__ ( self):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ : Dict = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def lowercase__ ( self):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowercase__ ( self):
'''simple docstring'''
return self.nir / self.red
def lowercase__ ( self):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def lowercase__ ( self):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 12 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = 1
lowercase__ : Optional[int] = 3
lowercase__ : Optional[int] = (32, 32)
lowercase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(SCREAMING_SNAKE_CASE_)
return image
@property
def lowercase__ ( self):
'''simple docstring'''
torch.manual_seed(0)
lowercase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowercase__ ( self):
'''simple docstring'''
torch.manual_seed(0)
lowercase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowercase__ ( self):
'''simple docstring'''
torch.manual_seed(0)
lowercase__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE_)
@property
def lowercase__ ( self):
'''simple docstring'''
def extract(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = torch.ones([0])
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.pixel_values.to(SCREAMING_SNAKE_CASE_)
return self
return Out()
return extract
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ : str = self.dummy_cond_unet
lowercase__ : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Union[str, Any] = self.dummy_vae
lowercase__ : Tuple = self.dummy_text_encoder
lowercase__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
# make sure here that pndm scheduler skips prk
lowercase__ : Any = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowercase__ : Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : str = """A painting of a squirrel eating a burger"""
lowercase__ : int = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(0)
lowercase__ : Any = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""")
lowercase__ : Dict = output.images
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(0)
lowercase__ : int = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase__ : List[str] = image[0, -3:, -3:, -1]
lowercase__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ : Tuple = self.dummy_cond_unet
lowercase__ : Dict = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = self.dummy_vae
lowercase__ : Optional[int] = self.dummy_text_encoder
lowercase__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
# make sure here that pndm scheduler skips prk
lowercase__ : Tuple = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowercase__ : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = """A painting of a squirrel eating a burger"""
lowercase__ : str = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(0)
lowercase__ : Dict = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""")
lowercase__ : Any = output.images
lowercase__ : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(0)
lowercase__ : Any = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=SCREAMING_SNAKE_CASE_)
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE_)
assert pipe.safety_checker is None
lowercase__ : Dict = pipe("""example prompt""" , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase__ : int = pipe("""example prompt""" , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""")
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.dummy_cond_unet
lowercase__ : List[str] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_)
lowercase__ : int = self.dummy_vae
lowercase__ : int = self.dummy_text_encoder
lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
# put models in fp16
lowercase__ : Optional[Any] = unet.half()
lowercase__ : List[Any] = vae.half()
lowercase__ : Any = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ : Dict = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowercase__ : Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = """A painting of a squirrel eating a burger"""
lowercase__ : List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""").images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=SCREAMING_SNAKE_CASE_)
lowercase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
lowercase__ : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : int = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowercase__ : Any = 40_03_66_03_46
lowercase__ : Dict = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase__ : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowercase__ : str = output.images
lowercase__ : List[str] = image[0, -3:, -3:, -1]
lowercase__ : List[str] = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
# without safety guidance (strong configuration)
lowercase__ : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase__ : Dict = output.images
lowercase__ : List[str] = image[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
lowercase__ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowercase__ : Tuple = 27_34_97_17_55
lowercase__ : List[str] = 7
lowercase__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowercase__ : Tuple = output.images
lowercase__ : List[str] = image[0, -3:, -3:, -1]
lowercase__ : int = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
lowercase__ : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase__ : List[str] = output.images
lowercase__ : int = image[0, -3:, -3:, -1]
lowercase__ : List[Any] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""")
lowercase__ : Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE_)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowercase__ : Any = 10_44_35_52_34
lowercase__ : List[str] = 12
lowercase__ : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowercase__ : Tuple = output.images
lowercase__ : str = image[0, -3:, -3:, -1]
lowercase__ : Any = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-7
lowercase__ : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase__ : Optional[int] = output.images
lowercase__ : Any = image[0, -3:, -3:, -1]
lowercase__ : Optional[int] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1])
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 12 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12 | 1 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(lowercase_ ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : List[str] = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
lowercase__ : List[Any] = val
return f[i][j]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase__ : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase__ : str = len(lowercase_ )
if num_items != len(lowercase_ ):
lowercase__ : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(lowercase_ )} values'
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
lowercase__ : int = (
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowercase_ )
lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : set = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Dict = [3, 2, 4, 4]
lowerCamelCase__ : List[Any] = [4, 3, 2, 3]
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Dict = 6
lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 12 | 1 |
import unittest
import numpy as np
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , ) -> np.ndarray:
'''simple docstring'''
lowercase__ : Tuple = np.shape(lowercase_ )
lowercase__ : Tuple = np.shape(lowercase_ )
lowercase__ : Optional[int] = np.shape(lowercase_ )
if shape_a[0] != shape_b[0]:
lowercase__ : List[Any] = (
"""Expected the same number of rows for A and B. """
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(lowercase_ )
if shape_b[1] != shape_c[1]:
lowercase__ : Any = (
"""Expected the same number of columns for B and C. """
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(lowercase_ )
lowercase__ : List[str] = pseudo_inv
if a_inv is None:
try:
lowercase__ : List[str] = np.linalg.inv(lowercase_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
lowercase__ : List[str] = np.array([[0, 3], [3, 0], [2, 3]])
lowercase__ : Tuple = np.array([[2, 1], [6, 3]])
lowercase__ : List[Any] = schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.block([[a, b], [b.T, c]])
lowercase__ : List[Any] = np.linalg.det(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = np.linalg.det(SCREAMING_SNAKE_CASE_)
lowercase__ : int = np.linalg.det(SCREAMING_SNAKE_CASE_)
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , det_a * det_s)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
lowercase__ : Dict = np.array([[0, 3], [3, 0], [2, 3]])
lowercase__ : int = np.array([[2, 1], [6, 3]])
with self.assertRaises(SCREAMING_SNAKE_CASE_):
schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
lowercase__ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]])
lowercase__ : str = np.array([[2, 1, 3], [6, 3, 5]])
with self.assertRaises(SCREAMING_SNAKE_CASE_):
schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 12 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" )
lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" )
lowercase__ : str = value.float()
for key, value in codebook_state_dict.items():
lowercase__ : Any = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ )
else:
lowercase__ : Optional[int] = FlavaConfig()
lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval()
lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ )
if os.path.exists(lowercase_ ):
lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
else:
lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ )
hf_model.load_state_dict(lowercase_ )
lowercase__ : Optional[int] = hf_model.state_dict()
lowercase__ : Optional[int] = count_parameters(lowercase_ )
lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ : List[str] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 12 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = (CMStochasticIterativeScheduler,)
__lowerCAmelCase : int = 10
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = 10
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[str] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_)
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
lowercase__ : int = scheduler.timesteps[0]
lowercase__ : str = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : str = 0.1 * sample
lowercase__ : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).prev_sample
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = scheduler.timesteps
lowercase__ : int = torch.manual_seed(0)
lowercase__ : List[str] = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_):
# 1. scale model input
lowercase__ : Dict = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict noise residual
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 3. predict previous sample x_t-1
lowercase__ : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
lowercase__ : int = pred_prev_sample
lowercase__ : int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 1_9_2.7_6_1_4) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = scheduler.timesteps
lowercase__ : List[Any] = torch.manual_seed(0)
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Any = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 3. predict previous sample x_t-1
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
lowercase__ : Any = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 3_4_7.6_3_5_7) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [39, 30, 12, 1, 0]
lowercase__ : str = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = image_size
lowercase__ : int = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Any = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : str = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = EfficientFormerImageProcessorTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 12 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class _snake_case :
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
__lowerCAmelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
__lowerCAmelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
__lowerCAmelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
__lowerCAmelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
__lowerCAmelCase : Optional[int] = field(
default=10_000 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
__lowerCAmelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
__lowerCAmelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
__lowerCAmelCase : Optional[int] = field(
default=750 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
__lowerCAmelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
__lowerCAmelCase : Optional[bool] = field(
default=UpperCAmelCase_ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
__lowerCAmelCase : Optional[int] = field(default=50_000 , metadata={'help': 'Maximum number of training steps.'} )
__lowerCAmelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
__lowerCAmelCase : Optional[int] = field(default=1_024 , metadata={'help': 'Sequence lengths used for training.'} )
__lowerCAmelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
__lowerCAmelCase : Optional[int] = field(
default=1_024 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
__lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
__lowerCAmelCase : Optional[bool] = field(default=UpperCAmelCase_ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class _snake_case :
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
__lowerCAmelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
__lowerCAmelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
__lowerCAmelCase : Optional[int] = field(default=1_024 , metadata={'help': 'Length of sequences to be evaluated.'} )
__lowerCAmelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class _snake_case :
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
__lowerCAmelCase : Optional[int] = field(default=UpperCAmelCase_ , metadata={'help': 'Number of workers used for code evaluation.'} )
__lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase_ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
__lowerCAmelCase : Optional[bool] = field(
default=UpperCAmelCase_ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
__lowerCAmelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
__lowerCAmelCase : Optional[int] = field(default=256 , metadata={'help': 'Maximum number of newly generated tokens.'} )
__lowerCAmelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
__lowerCAmelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
__lowerCAmelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
__lowerCAmelCase : Optional[int] = field(
default=200 , metadata={'help': 'Number of completions to generate for each sample.'} )
__lowerCAmelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
__lowerCAmelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
__lowerCAmelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
__lowerCAmelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class _snake_case :
__lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
__lowerCAmelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
__lowerCAmelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
__lowerCAmelCase : Optional[int] = field(
default=100_000 , metadata={'help': 'Number of files to save per JSON output file.'} )
__lowerCAmelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
__lowerCAmelCase : Optional[float] = field(
default=1_000 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
__lowerCAmelCase : Optional[float] = field(
default=100 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
__lowerCAmelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
__lowerCAmelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
__lowerCAmelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
__lowerCAmelCase : Optional[bool] = field(
default=UpperCAmelCase_ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
__lowerCAmelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class _snake_case :
__lowerCAmelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
__lowerCAmelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
__lowerCAmelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
__lowerCAmelCase : Optional[int] = field(default=200_000 , metadata={'help': 'Number of examples to train tokenizer on.'} )
__lowerCAmelCase : Optional[int] = field(
default=32_768 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
__lowerCAmelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
__lowerCAmelCase : Optional[bool] = field(default=UpperCAmelCase_ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class _snake_case :
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
__lowerCAmelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
__lowerCAmelCase : Optional[int] = field(default=UpperCAmelCase_ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class _snake_case :
__lowerCAmelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
__lowerCAmelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
__lowerCAmelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
__lowerCAmelCase : Optional[bool] = field(default=UpperCAmelCase_ , metadata={'help': 'Push saved tokenizer to the hub.'} )
| 12 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ : List[str] = 2
class _snake_case :
def __init__( self , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_=None , ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = bos, unk, pad, eos
lowercase__ : Union[str, Any] = []
lowercase__ : str = []
lowercase__ : List[str] = {}
lowercase__ : List[str] = self.add_symbol(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.add_symbol(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.add_symbol(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.add_symbol(SCREAMING_SNAKE_CASE_)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = len(self.symbols)
def __eq__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
'''simple docstring'''
return len(self.symbols)
def __contains__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return sym in self.indices
@classmethod
def lowercase__ ( cls , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = cls()
d.add_from_file(SCREAMING_SNAKE_CASE_)
return d
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False):
'''simple docstring'''
if word in self.indices and not overwrite:
lowercase__ : Optional[Any] = self.indices[word]
lowercase__ : int = self.count[idx] + n
return idx
else:
lowercase__ : Optional[int] = len(self.symbols)
lowercase__ : Dict = idx
self.symbols.append(SCREAMING_SNAKE_CASE_)
self.count.append(SCREAMING_SNAKE_CASE_)
return idx
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return 0
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
try:
with open(SCREAMING_SNAKE_CASE_ , """r""" , encoding="""utf-8""") as fd:
self.add_from_file(SCREAMING_SNAKE_CASE_)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(SCREAMING_SNAKE_CASE_))
return
lowercase__ : Any = f.readlines()
lowercase__ : str = self._load_meta(SCREAMING_SNAKE_CASE_)
for line in lines[indices_start_line:]:
try:
lowercase__ , lowercase__ : Tuple = line.rstrip().rsplit(""" """ , 1)
if field == "#fairseq:overwrite":
lowercase__ : int = True
lowercase__ , lowercase__ : Dict = line.rsplit(""" """ , 1)
else:
lowercase__ : int = False
lowercase__ : Dict = int(SCREAMING_SNAKE_CASE_)
lowercase__ : str = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(SCREAMING_SNAKE_CASE_))
self.add_symbol(SCREAMING_SNAKE_CASE_ , n=SCREAMING_SNAKE_CASE_ , overwrite=SCREAMING_SNAKE_CASE_)
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""")
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Any = dict((re.sub(R"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
lowercase__ : Tuple = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase__ : str = d[k] # restore
return da
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if not os.path.exists(lowercase_ ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__ : Dict = os.path.join(lowercase_ , """checkpoint.pt""" )
if not os.path.isfile(lowercase_ ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
lowercase__ : List[Any] = torch.load(lowercase_ , map_location="""cpu""" )
lowercase__ : Optional[Any] = chkpt["""cfg"""]["""model"""]
# dicts
lowercase__ : str = os.path.join(lowercase_ , """dict.txt""" )
if not os.path.isfile(lowercase_ ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
lowercase__ : List[Any] = Dictionary.load(lowercase_ )
lowercase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowercase__ : int = len(lowercase_ )
lowercase__ : Dict = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
lowercase__ : Any = os.path.join(lowercase_ , """bpecodes""" )
if not os.path.isfile(lowercase_ ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
lowercase__ : Dict = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowercase_ , lowercase_ )
# model config
lowercase__ : Optional[int] = os.path.join(lowercase_ , """config.json""" )
lowercase__ : Dict = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
lowercase__ : Dict = os.path.join(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
lowercase__ : Union[str, Any] = chkpt["""model"""]
# remove unneeded keys
lowercase__ : Union[str, Any] = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
lowercase__ : List[Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
lowercase__ : Optional[Any] = model_state_dict.pop(lowercase_ )
else:
lowercase__ : Dict = model_state_dict.pop(lowercase_ )
lowercase__ : Optional[Any] = BioGptConfig.from_pretrained(lowercase_ )
lowercase__ : Optional[Any] = BioGptForCausalLM(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ )
# save
lowercase__ : Tuple = os.path.join(lowercase_ , lowercase_ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 12 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class _snake_case ( nn.Module ):
def __init__( self):
'''simple docstring'''
super().__init__()
lowercase__ : Optional[Any] = nn.Linear(3 , 4)
lowercase__ : Union[str, Any] = nn.BatchNormad(4)
lowercase__ : str = nn.Linear(4 , 5)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_)))
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, """hello"""])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function(1_28 , """hello""" , """world""")
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0])
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
raise ValueError("""Oops, we had an error!""")
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0])
@require_cuda
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = torch.cuda.memory_allocated()
lowercase__ : str = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_)
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
| 12 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _snake_case :
__lowerCAmelCase : Optional[Any] = MBartConfig
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Any = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , ):
'''simple docstring'''
lowercase__ : str = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : str = use_labels
lowercase__ : Any = vocab_size
lowercase__ : str = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Any = eos_token_id
lowercase__ : Dict = pad_token_id
lowercase__ : Dict = bos_token_id
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
lowercase__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
lowercase__ : Dict = tf.concat([input_ids, eos_tensor] , axis=1)
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__ : Tuple = prepare_mbart_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = TFMBartModel(config=SCREAMING_SNAKE_CASE_).get_decoder()
lowercase__ : Optional[Any] = inputs_dict["""input_ids"""]
lowercase__ : List[str] = input_ids[:1, :]
lowercase__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
lowercase__ : List[str] = inputs_dict["""head_mask"""]
lowercase__ : Any = 1
# first forward pass
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : int = outputs.to_tuple()
lowercase__ : str = past_key_values[1]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
lowercase__ : Optional[int] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__ : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__ : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Any = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__lowerCAmelCase : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase : Optional[Any] = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : str = False
__lowerCAmelCase : Union[str, Any] = False
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = TFMBartModelTester(self)
lowercase__ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_)
@require_sentencepiece
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
__lowerCAmelCase : Any = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__lowerCAmelCase : int = 'facebook/mbart-large-en-ro'
@cached_property
def lowercase__ ( self):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.translate_src_text(**SCREAMING_SNAKE_CASE_)
self.assertListEqual(self.expected_text , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE_ , return_tensors="""tf""")
lowercase__ : Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2)
lowercase__ : Union[str, Any] = self.tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
return generated_words
@slow
def lowercase__ ( self):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 12 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 48
lowercase__ : int = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Any = 60
lowercase__ : Tuple = [6, 6, 6, 6]
lowercase__ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = 4
lowercase__ : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = 1_26
lowercase__ : Any = 7
lowercase__ : int = 255.0
lowercase__ : List[Any] = """"""
return config
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowercase__ : List[str] = """layernorm.bias"""
if "conv_first" in name:
lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ : str = """swin2sr.""" + name
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Any = key.split(""".""" )
lowercase__ : List[Any] = int(key_split[1] )
lowercase__ : Dict = int(key_split[4] )
lowercase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : Optional[Any] = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = get_config(lowercase_ )
lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
lowercase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
lowercase__ : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : Union[str, Any] = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : int = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ : str = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 12 | 1 |
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
lowercase__ : Union[str, Any] = """"""
while len(lowercase_ ) % 3 != 0:
lowercase__ : Tuple = """0""" + bin_string
lowercase__ : List[str] = [
bin_string[index : index + 3]
for index in range(len(lowercase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowercase__ : List[Any] = 0
for index, val in enumerate(lowercase_ ):
oct_val += int(2 ** (2 - index) * int(lowercase_ ) )
oct_string += str(lowercase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 12 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 | 1 |
from __future__ import annotations
import time
lowerCamelCase__ : List[str] = list[tuple[int, int]]
lowerCamelCase__ : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = pos_x
lowercase__ : Optional[int] = pos_y
lowercase__ : List[Any] = (pos_y, pos_x)
lowercase__ : Optional[Any] = goal_x
lowercase__ : Union[str, Any] = goal_y
lowercase__ : Any = parent
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [self.start]
lowercase__ : int = False
def lowercase__ ( self):
'''simple docstring'''
while self.node_queue:
lowercase__ : List[Any] = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
lowercase__ : Union[str, Any] = True
return self.retrace_path(SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_successors(SCREAMING_SNAKE_CASE_)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE_)
if not self.reached:
return [self.start.pos]
return None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = []
for action in delta:
lowercase__ : Optional[Any] = parent.pos_x + action[1]
lowercase__ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , SCREAMING_SNAKE_CASE_))
return successors
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = node
lowercase__ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
lowercase__ : Tuple = current_node.parent
path.reverse()
return path
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = False
def lowercase__ ( self):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowercase__ : Optional[int] = self.fwd_bfs.node_queue.pop(0)
lowercase__ : Optional[Any] = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
lowercase__ : Optional[Any] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = current_bwd_node
lowercase__ : str = current_fwd_node
lowercase__ : Tuple = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE_),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE_),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE_)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_)
bwd_path.pop()
bwd_path.reverse()
lowercase__ : Optional[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase__ : Optional[Any] = (0, 0)
lowerCamelCase__ : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase__ : Tuple = time.time()
lowerCamelCase__ : Union[str, Any] = BreadthFirstSearch(init, goal)
lowerCamelCase__ : List[Any] = bfs.search()
lowerCamelCase__ : str = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
lowerCamelCase__ : Tuple = time.time()
lowerCamelCase__ : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase__ : Union[str, Any] = bd_bfs.search()
lowerCamelCase__ : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 12 |
lowerCamelCase__ : List[str] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 12 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCamelCase__ : Dict = logging.getLogger(__name__)
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : int = self.layer[current_layer](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , head_mask[current_layer])
lowercase__ : List[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , UpperCAmelCase_ , )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = BertEncoderWithPabee(SCREAMING_SNAKE_CASE_)
self.init_weights()
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = 0
lowercase__ : Optional[Any] = 0
lowercase__ : Optional[int] = 0
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = threshold
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = patience
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = 0
lowercase__ : Tuple = 0
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = self.inference_layers_num / self.inference_instances_num
lowercase__ : Optional[Any] = (
f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
f' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(SCREAMING_SNAKE_CASE_)
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase__ : int = input_ids.size()
elif inputs_embeds is not None:
lowercase__ : int = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase__ : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase__ : Optional[int] = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_)
if token_type_ids is None:
lowercase__ : str = torch.zeros(SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase__ : torch.Tensor = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase__ , lowercase__ , lowercase__ : List[Any] = encoder_hidden_states.size()
lowercase__ : Tuple = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase__ : Tuple = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.invert_attention_mask(SCREAMING_SNAKE_CASE_)
else:
lowercase__ : Optional[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase__ : int = self.get_head_mask(SCREAMING_SNAKE_CASE_ , self.config.num_hidden_layers)
lowercase__ : str = self.embeddings(
input_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = embedding_output
if self.training:
lowercase__ : Tuple = []
for i in range(self.config.num_hidden_layers):
lowercase__ : Tuple = self.encoder.adaptive_forward(
SCREAMING_SNAKE_CASE_ , current_layer=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.pooler(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = output_layers[i](output_dropout(SCREAMING_SNAKE_CASE_))
res.append(SCREAMING_SNAKE_CASE_)
elif self.patience == 0: # Use all layers for inference
lowercase__ : Dict = self.encoder(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[int] = self.pooler(encoder_outputs[0])
lowercase__ : Optional[Any] = [output_layers[self.config.num_hidden_layers - 1](SCREAMING_SNAKE_CASE_)]
else:
lowercase__ : Tuple = 0
lowercase__ : Dict = None
lowercase__ : Union[str, Any] = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase__ : List[Any] = self.encoder.adaptive_forward(
SCREAMING_SNAKE_CASE_ , current_layer=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = output_layers[i](SCREAMING_SNAKE_CASE_)
if regression:
lowercase__ : Any = logits.detach()
if patient_result is not None:
lowercase__ : str = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase__ : List[Any] = 0
else:
lowercase__ : Tuple = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase__ : Union[str, Any] = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(SCREAMING_SNAKE_CASE_)):
patient_counter += 1
else:
lowercase__ : Dict = 0
lowercase__ : List[str] = logits
if patient_counter == self.patience:
break
lowercase__ : Union[str, Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , UpperCAmelCase_ , )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = config.num_labels
lowercase__ : str = BertModelWithPabee(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = nn.Dropout(config.hidden_dropout_prob)
lowercase__ : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
'''simple docstring'''
lowercase__ : Any = self.bert(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase__ : Optional[Any] = (logits[-1],)
if labels is not None:
lowercase__ : List[str] = None
lowercase__ : Union[str, Any] = 0
for ix, logits_item in enumerate(SCREAMING_SNAKE_CASE_):
if self.num_labels == 1:
# We are doing regression
lowercase__ : int = MSELoss()
lowercase__ : List[str] = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase__ : Union[str, Any] = CrossEntropyLoss()
lowercase__ : Dict = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase__ : Optional[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase__ : List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 12 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ):
'''simple docstring'''
lowercase__ : str = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Any = rotary_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[int] = None
lowercase__ : str = vocab_size - 1
lowercase__ : Any = vocab_size - 1
lowercase__ : Dict = vocab_size - 1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = 20
lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : List[str] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : str = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = 20
lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : Any = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
@require_flax
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = FlaxGPTJModelTester(self)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""")
lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : Optional[Any] = False
lowercase__ : List[str] = model.config.eos_token_id
lowercase__ : List[Any] = jax.jit(model.generate)
lowercase__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences
lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape
lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : str = 0
lowercase__ : List[Any] = 1
lowercase__ : Dict = 0
lowercase__ : Any = 1
lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = fx_state
with torch.no_grad():
lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params)
lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape
lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = 0
lowercase__ : int = 1
lowercase__ : str = 0
lowercase__ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_)
with torch.no_grad():
lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : int = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
| 12 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 12 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['image_processor', 'tokenizer']
__lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor'
__lowerCAmelCase : int = 'AutoTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if images is not None:
lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if text is not None and images is not None:
lowercase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
@property
def lowercase__ ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 12 | 1 |
def UpperCamelCase ( lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : Any = len(lowercase_ )
for i in range(length - 1 ):
lowercase__ : List[str] = i
for k in range(i + 1 , lowercase_ ):
if collection[k] < collection[least]:
lowercase__ : Any = k
if least != i:
lowercase__ , lowercase__ : Dict = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCamelCase__ : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 12 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase__ : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = 2
while digits < n:
index += 1
lowercase__ : str = len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 12 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.