code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import math
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = len(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = int(math.floor(math.sqrt(lowerCamelCase_)))
UpperCamelCase__ : List[str] = 0
while arr[min(lowerCamelCase_ , lowerCamelCase_) - 1] < x:
UpperCamelCase__ : Tuple = step
step += int(math.floor(math.sqrt(lowerCamelCase_)))
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase__ : int = prev + 1
if prev == min(lowerCamelCase_ , lowerCamelCase_):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
lowerCAmelCase__ = int(input('Enter the number to be searched:\n'))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f'''Number {x} is at index {res}''')
| 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('T')
class __lowercase (Generic[T] ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : T):
UpperCamelCase__ : Tuple = data
UpperCamelCase__ : Node[T] | None = None
def __str__( self : str):
return F'{self.data}'
class __lowercase (Generic[T] ):
def __init__( self : Tuple):
UpperCamelCase__ : Node[T] | None = None
def __iter__( self : Any):
UpperCamelCase__ : List[Any] = self.top
while node:
yield node.data
UpperCamelCase__ : Union[str, Any] = node.next
def __str__( self : int):
return "->".join([str(UpperCAmelCase_) for item in self])
def __len__( self : Optional[Any]):
return len(tuple(iter(self)))
def __UpperCamelCase ( self : Optional[Any]):
return self.top is None
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : T):
UpperCamelCase__ : Tuple = Node(UpperCAmelCase_)
if not self.is_empty():
UpperCamelCase__ : Optional[int] = self.top
UpperCamelCase__ : List[Any] = node
def __UpperCamelCase ( self : int):
if self.is_empty():
raise IndexError('pop from empty stack')
assert isinstance(self.top , UpperCAmelCase_)
UpperCamelCase__ : str = self.top
UpperCamelCase__ : Dict = self.top.next
return pop_node.data
def __UpperCamelCase ( self : Optional[Any]):
if self.is_empty():
raise IndexError('peek from empty stack')
assert self.top is not None
return self.top.data
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_) -> None:
create_state_space_tree(lowerCamelCase_ , [] , 0 , [0 for i in range(len(lowerCamelCase_))])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> None:
if index == len(lowerCamelCase_):
print(lowerCamelCase_)
return
for i in range(len(lowerCamelCase_)):
if not index_used[i]:
current_sequence.append(sequence[i])
UpperCamelCase__ : List[str] = True
create_state_space_tree(lowerCamelCase_ , lowerCamelCase_ , index + 1 , lowerCamelCase_)
current_sequence.pop()
UpperCamelCase__ : Dict = False
lowerCAmelCase__ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase__ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ = 600_851_475_143) -> int:
try:
UpperCamelCase__ : Dict = int(lowerCamelCase_)
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.')
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.')
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : Union[str, Any] = 2
while i * i <= n:
while n % i == 0:
UpperCamelCase__ : List[str] = i
n //= i
i += 1
if n > 1:
UpperCamelCase__ : str = n
return int(lowerCamelCase_)
if __name__ == "__main__":
print(f'''{solution() = }''')
| 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = 42
class __lowercase (__lowerCamelCase , __lowerCamelCase ):
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase_ : Tuple[int] = (64,) , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "silu" , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : float = 0.1_82_15 , UpperCAmelCase_ : str = "group" , ):
super().__init__()
# pass init params to Encoder
UpperCamelCase__ : List[Any] = Encoder(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , down_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , double_z=UpperCAmelCase_ , )
UpperCamelCase__ : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase__ : str = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1)
UpperCamelCase__ : Tuple = VectorQuantizer(UpperCAmelCase_ , UpperCAmelCase_ , beta=0.25 , remap=UpperCAmelCase_ , sane_index_shape=UpperCAmelCase_)
UpperCamelCase__ : Any = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1)
# pass init params to Decoder
UpperCamelCase__ : List[str] = Decoder(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , up_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , norm_type=UpperCAmelCase_ , )
@apply_forward_hook
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True):
UpperCamelCase__ : Dict = self.encoder(UpperCAmelCase_)
UpperCamelCase__ : Dict = self.quant_conv(UpperCAmelCase_)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase_)
@apply_forward_hook
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True):
# also go through quantization layer
if not force_not_quantize:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = self.quantize(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = h
UpperCamelCase__ : Union[str, Any] = self.post_quant_conv(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.decoder(UpperCAmelCase_ , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True):
UpperCamelCase__ : str = sample
UpperCamelCase__ : int = self.encode(UpperCAmelCase_).latents
UpperCamelCase__ : Union[str, Any] = self.decode(UpperCAmelCase_).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_)
| 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 1 |
'''simple docstring'''
from typing import Any
def __UpperCAmelCase ( lowerCamelCase_) -> list[Any]:
if not input_list:
return []
UpperCamelCase__ : Dict = [input_list.count(lowerCamelCase_) for value in input_list]
UpperCamelCase__ : Optional[Any] = max(lowerCamelCase_) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCamelCase_) if value == y})
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCamelCase__ : Tuple = mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
UpperCamelCase__ : Union[str, Any] = max(
mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , j - wt[i - 1]) + val[i - 1] , )
UpperCamelCase__ : List[str] = val
return f[i][j]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Any = [[0] * (w + 1) for _ in range(n + 1)]
for i in range(1 , n + 1):
for w_ in range(1 , w + 1):
if wt[i - 1] <= w_:
UpperCamelCase__ : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_])
else:
UpperCamelCase__ : str = dp[i - 1][w_]
return dp[n][w_], dp
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
if not (isinstance(lowerCamelCase_ , (list, tuple)) and isinstance(lowerCamelCase_ , (list, tuple))):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples')
UpperCamelCase__ : Tuple = len(lowerCamelCase_)
if num_items != len(lowerCamelCase_):
UpperCamelCase__ : Union[str, Any] = (
'The number of weights must be the same as the number of values.\n'
f'But got {num_items} weights and {len(lowerCamelCase_)} values'
)
raise ValueError(lowerCamelCase_)
for i in range(lowerCamelCase_):
if not isinstance(wt[i] , lowerCamelCase_):
UpperCamelCase__ : str = (
'All weights must be integers but got weight of '
f'type {type(wt[i])} at index {i}'
)
raise TypeError(lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__ : List[Any] = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : set = set()
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
return optimal_val, example_optional_set
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , lowerCamelCase_ , lowerCamelCase_)
else:
optimal_set.add(lowerCamelCase_)
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , j - wt[i - 1] , lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = [3, 2, 4, 4]
lowerCAmelCase__ = [4, 3, 2, 3]
lowerCAmelCase__ = 4
lowerCAmelCase__ = 6
lowerCAmelCase__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCAmelCase__ , lowerCAmelCase__ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCAmelCase__ , lowerCAmelCase__ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '▁'
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase__ = {
'facebook/s2t-small-librispeech-asr': 1024,
}
lowerCAmelCase__ = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase__ = {'mustc': MUSTC_LANGS}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = MAX_MODEL_INPUT_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = []
def __init__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ):
UpperCamelCase__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , do_upper_case=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , lang_codes=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = do_upper_case
UpperCamelCase__ : Any = do_lower_case
UpperCamelCase__ : Dict = load_json(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : int = spm_file
UpperCamelCase__ : List[str] = load_spm(UpperCAmelCase_ , self.sp_model_kwargs)
if lang_codes is not None:
UpperCamelCase__ : Union[str, Any] = lang_codes
UpperCamelCase__ : List[Any] = LANGUAGES[lang_codes]
UpperCamelCase__ : List[Any] = [F'<lang:{lang}>' for lang in self.langs]
UpperCamelCase__ : Union[str, Any] = {lang: self.sp_model.PieceToId(F'<lang:{lang}>') for lang in self.langs}
UpperCamelCase__ : int = self.lang_tokens
UpperCamelCase__ : List[Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
UpperCamelCase__ : int = {}
@property
def __UpperCamelCase ( self : List[Any]):
return len(self.encoder)
@property
def __UpperCamelCase ( self : Any):
return self._tgt_lang
@tgt_lang.setter
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : List[Any] = self.lang_code_to_id[tgt_lang]
UpperCamelCase__ : Optional[int] = [lang_code_id]
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder[self.unk_token])
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int):
return self.decoder.get(UpperCAmelCase_ , self.unk_token)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : str = []
UpperCamelCase__ : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCamelCase__ : int = self.sp_model.decode(UpperCAmelCase_)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCamelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.sp_model.decode(UpperCAmelCase_)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = [1] * len(self.prefix_tokens)
UpperCamelCase__ : str = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_)) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_)) + ([0] * len(UpperCAmelCase_)) + suffix_ones
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : str):
UpperCamelCase__ : int = self.__dict__.copy()
UpperCamelCase__ : Tuple = None
return state
def __setstate__( self : int , UpperCAmelCase_ : Dict):
UpperCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
UpperCamelCase__ : Union[str, Any] = {}
UpperCamelCase__ : Any = load_spm(self.spm_file , self.sp_model_kwargs)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
UpperCamelCase__ : List[Any] = Path(UpperCAmelCase_)
assert save_dir.is_dir(), F'{save_directory} should be a directory'
UpperCamelCase__ : Dict = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
UpperCamelCase__ : str = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , UpperCAmelCase_)
if os.path.abspath(self.spm_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , UpperCAmelCase_)
elif not os.path.isfile(self.spm_file):
with open(UpperCAmelCase_ , 'wb') as fi:
UpperCamelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (str(UpperCAmelCase_), str(UpperCAmelCase_))
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> sentencepiece.SentencePieceProcessor:
UpperCamelCase__ : Any = sentencepiece.SentencePieceProcessor(**lowerCamelCase_)
spm.Load(str(lowerCamelCase_))
return spm
def __UpperCAmelCase ( lowerCamelCase_) -> Union[Dict, List]:
with open(lowerCamelCase_ , 'r') as f:
return json.load(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
with open(lowerCamelCase_ , 'w') as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=2)
| 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Any = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : int):
# pass variant but use the non-variant filenames
UpperCamelCase__ : Any = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
UpperCamelCase__ : Tuple = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Tuple = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
UpperCamelCase__ : Optional[int] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : List[Any]):
# pass variant but use the non-variant filenames
UpperCamelCase__ : Union[str, Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
| 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 1 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Optional[Any] = model.config
UpperCamelCase__ : Optional[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCamelCase__ : Optional[Any] = MBartConfig(
is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , add_cross_attention=lowerCamelCase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer) , scale_embedding=lowerCamelCase_ , add_final_layer_norm=lowerCamelCase_ , )
return encoder_config, decoder_config
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
if "encoder.model" in name:
UpperCamelCase__ : Any = name.replace('encoder.model' , 'encoder')
if "decoder.model" in name:
UpperCamelCase__ : str = name.replace('decoder.model' , 'decoder')
if "patch_embed.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection')
if "patch_embed.norm" in name:
UpperCamelCase__ : int = name.replace('patch_embed.norm' , 'embeddings.norm')
if name.startswith('encoder'):
if "layers" in name:
UpperCamelCase__ : int = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "attn" in name and "mask" not in name:
UpperCamelCase__ : Any = name.replace('attn' , 'attention.self')
if "norm1" in name:
UpperCamelCase__ : Tuple = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('norm2' , 'layernorm_after')
if "mlp.fc1" in name:
UpperCamelCase__ : List[str] = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('mlp.fc2' , 'output.dense')
if name == "encoder.norm.weight":
UpperCamelCase__ : Optional[int] = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
UpperCamelCase__ : Tuple = 'encoder.layernorm.bias'
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ : List[str] = orig_state_dict.pop(lowerCamelCase_)
if "qkv" in key:
UpperCamelCase__ : List[Any] = key.split('.')
UpperCamelCase__ : Optional[Any] = int(key_split[3])
UpperCamelCase__ : Any = int(key_split[5])
UpperCamelCase__ : List[str] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ : Any = val[:dim, :]
UpperCamelCase__ : List[Any] = val[dim : dim * 2, :]
UpperCamelCase__ : Optional[Any] = val[-dim:, :]
else:
UpperCamelCase__ : Dict = val[:dim]
UpperCamelCase__ : List[str] = val[dim : dim * 2]
UpperCamelCase__ : str = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCamelCase__ : Tuple = val
return orig_state_dict
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False) -> Dict:
# load original model
UpperCamelCase__ : str = DonutModel.from_pretrained(lowerCamelCase_).eval()
# load HuggingFace model
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = get_configs(lowerCamelCase_)
UpperCamelCase__ : int = DonutSwinModel(lowerCamelCase_)
UpperCamelCase__ : str = MBartForCausalLM(lowerCamelCase_)
UpperCamelCase__ : List[Any] = VisionEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_)
model.eval()
UpperCamelCase__ : List[str] = original_model.state_dict()
UpperCamelCase__ : str = convert_state_dict(lowerCamelCase_ , lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
# verify results on scanned document
UpperCamelCase__ : Dict = load_dataset('hf-internal-testing/example-documents')
UpperCamelCase__ : int = dataset['test'][0]['image'].convert('RGB')
UpperCamelCase__ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase_ , from_slow=lowerCamelCase_)
UpperCamelCase__ : int = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1])
UpperCamelCase__ : Any = DonutProcessor(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = processor(lowerCamelCase_ , return_tensors='pt').pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCamelCase__ : int = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
UpperCamelCase__ : List[str] = 'When is the coffee break?'
UpperCamelCase__ : Optional[int] = task_prompt.replace('{user_input}' , lowerCamelCase_)
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCamelCase__ : Any = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCamelCase__ : Dict = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCamelCase__ : Optional[int] = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCamelCase__ : Optional[int] = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCamelCase__ : Union[str, Any] = 'hello world'
else:
raise ValueError('Model name not supported')
UpperCamelCase__ : Optional[int] = original_model.decoder.tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors='pt')[
'input_ids'
]
UpperCamelCase__ : Union[str, Any] = original_model.encoder.model.patch_embed(lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__ : Any = model.encoder.embeddings(lowerCamelCase_)
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3)
# verify encoder hidden states
UpperCamelCase__ : int = original_model.encoder(lowerCamelCase_)
UpperCamelCase__ : Tuple = model.encoder(lowerCamelCase_).last_hidden_state
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2)
# verify decoder hidden states
UpperCamelCase__ : Optional[int] = original_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_).logits
UpperCamelCase__ : Optional[int] = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_).logits
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/')[-1] , commit_message='Update model')
processor.push_to_hub('nielsr/' + model_name.split('/')[-1] , commit_message='Update model')
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
lowerCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
# Return True if there is node that has not iterated.
UpperCamelCase__ : Any = [False] * len(lowerCamelCase_)
UpperCamelCase__ : List[str] = [s]
UpperCamelCase__ : Any = True
while queue:
UpperCamelCase__ : int = queue.pop(0)
for ind in range(len(graph[u])):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase_)
UpperCamelCase__ : int = True
UpperCamelCase__ : Union[str, Any] = u
return visited[t]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Union[str, Any] = [-1] * (len(lowerCamelCase_))
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : List[str] = float('Inf')
UpperCamelCase__ : List[Any] = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase__ : Optional[Any] = min(lowerCamelCase_ , graph[parent[s]][s])
UpperCamelCase__ : str = parent[s]
max_flow += path_flow
UpperCamelCase__ : int = sink
while v != source:
UpperCamelCase__ : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase__ : Optional[Any] = parent[v]
for i in range(len(lowerCamelCase_)):
for j in range(len(graph[0])):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j))
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 1 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
if args.student_type == "roberta":
UpperCamelCase__ : int = False
elif args.student_type == "gpt2":
UpperCamelCase__ : int = False
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
if args.student_type == "roberta":
UpperCamelCase__ : Tuple = False
def __UpperCAmelCase ( ) -> Any:
UpperCamelCase__ : List[Any] = argparse.ArgumentParser(description='Training')
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.')
parser.add_argument(
'--dump_path' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory (log, checkpoints, parameters, etc.)')
parser.add_argument(
'--data_file' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=lowerCamelCase_ , choices=['distilbert', 'roberta', 'gpt2'] , required=lowerCamelCase_ , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to the student configuration.')
parser.add_argument(
'--student_pretrained_weights' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Load student initialization checkpoint.')
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=lowerCamelCase_ , help='Teacher type (BERT, RoBERTa).')
parser.add_argument('--teacher_name' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The teacher model.')
parser.add_argument('--temperature' , default=2.0 , type=lowerCamelCase_ , help='Temperature for the softmax temperature.')
parser.add_argument(
'--alpha_ce' , default=0.5 , type=lowerCamelCase_ , help='Linear weight for the distillation loss. Must be >=0.')
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=lowerCamelCase_ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=lowerCamelCase_ , help='Linear weight for the CLM loss. Must be >=0.')
parser.add_argument('--alpha_mse' , default=0.0 , type=lowerCamelCase_ , help='Linear weight of the MSE loss. Must be >=0.')
parser.add_argument(
'--alpha_cos' , default=0.0 , type=lowerCamelCase_ , help='Linear weight of the cosine embedding loss. Must be >=0.')
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.')
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=lowerCamelCase_ , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=lowerCamelCase_ , help='Proportion of tokens to mask out.')
parser.add_argument('--word_keep' , default=0.1 , type=lowerCamelCase_ , help='Proportion of tokens to keep.')
parser.add_argument('--word_rand' , default=0.1 , type=lowerCamelCase_ , help='Proportion of tokens to randomly replace.')
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=lowerCamelCase_ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=lowerCamelCase_ , help='The token counts in the data_file for MLM.')
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=lowerCamelCase_ , default=3 , help='Number of pass on the whole dataset.')
parser.add_argument('--batch_size' , type=lowerCamelCase_ , default=5 , help='Batch size (for each process).')
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=lowerCamelCase_ , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=lowerCamelCase_ , help='Linear warmup proportion.')
parser.add_argument('--weight_decay' , default=0.0 , type=lowerCamelCase_ , help='Weight decay if we apply some.')
parser.add_argument('--learning_rate' , default=5e-4 , type=lowerCamelCase_ , help='The initial learning rate for Adam.')
parser.add_argument('--adam_epsilon' , default=1e-6 , type=lowerCamelCase_ , help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm' , default=5.0 , type=lowerCamelCase_ , help='Max gradient norm.')
parser.add_argument('--initializer_range' , default=0.02 , type=lowerCamelCase_ , help='Random initialization range.')
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCamelCase_ , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=lowerCamelCase_ , default=1 , help='Number of GPUs in the node.')
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='Distributed training - Local rank')
parser.add_argument('--seed' , type=lowerCamelCase_ , default=56 , help='Random seed')
parser.add_argument('--log_interval' , type=lowerCamelCase_ , default=500 , help='Tensorboard logging interval.')
parser.add_argument('--checkpoint_interval' , type=lowerCamelCase_ , default=4_000 , help='Checkpoint interval.')
UpperCamelCase__ : Tuple = parser.parse_args()
sanity_checks(lowerCamelCase_)
# ARGS #
init_gpu_params(lowerCamelCase_)
set_seed(lowerCamelCase_)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it')
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(f'Experiment will be dumped and logged in {args.dump_path}')
# SAVE PARAMS #
logger.info(f'Param: {args}')
with open(os.path.join(args.dump_path , 'parameters.json') , 'w') as f:
json.dump(vars(lowerCamelCase_) , lowerCamelCase_ , indent=4)
git_log(args.dump_path)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Optional[int] = MODEL_CLASSES[args.student_type]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase__ : Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
UpperCamelCase__ : Dict = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase__ : List[Any] = tokenizer.all_special_tokens.index(lowerCamelCase_)
UpperCamelCase__ : Dict = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}')
UpperCamelCase__ : int = special_tok_ids
UpperCamelCase__ : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file , 'rb') as fp:
UpperCamelCase__ : Optional[int] = pickle.load(lowerCamelCase_)
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)')
with open(args.token_counts , 'rb') as fp:
UpperCamelCase__ : List[Any] = pickle.load(lowerCamelCase_)
UpperCamelCase__ : List[Any] = np.maximum(lowerCamelCase_ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase__ : str = 0.0 # do not predict special tokens
UpperCamelCase__ : Tuple = torch.from_numpy(lowerCamelCase_)
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Union[str, Any] = LmSeqsDataset(params=lowerCamelCase_ , data=lowerCamelCase_)
logger.info('Data loader created.')
# STUDENT #
logger.info(f'Loading student config from {args.student_config}')
UpperCamelCase__ : int = student_config_class.from_pretrained(args.student_config)
UpperCamelCase__ : Optional[Any] = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}')
UpperCamelCase__ : Dict = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCamelCase_)
else:
UpperCamelCase__ : str = student_model_class(lowerCamelCase_)
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}')
logger.info('Student loaded.')
# TEACHER #
UpperCamelCase__ : Union[str, Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCamelCase_)
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}')
logger.info(f'Teacher loaded from {args.teacher_name}.')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCamelCase_ , lowerCamelCase_)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCamelCase_ , lowerCamelCase_)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase__ : Dict = Distiller(
params=lowerCamelCase_ , dataset=lowerCamelCase_ , token_probs=lowerCamelCase_ , student=lowerCamelCase_ , teacher=lowerCamelCase_)
distiller.train()
logger.info('Let\'s go get some drinks.')
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase__ : Optional[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase_ , output_loading_info=lowerCamelCase_)
else:
UpperCamelCase__ : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase_ , output_loading_info=lowerCamelCase_)
UpperCamelCase__ : str = ['key_proj', 'value_proj', 'query_proj']
UpperCamelCase__ : Dict = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
UpperCamelCase__ : Dict = key.split('.')
if attributes[0] == "lm_head":
UpperCamelCase__ : int = prophet
UpperCamelCase__ : Any = prophet_old
else:
UpperCamelCase__ : int = prophet.prophetnet
UpperCamelCase__ : Tuple = prophet_old.model
UpperCamelCase__ : str = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase__ : List[Any] = mapping[attribute]
if not hasattr(lowerCamelCase_ , lowerCamelCase_) and len(lowerCamelCase_) > 0:
UpperCamelCase__ : List[Any] = attribute
elif hasattr(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : int = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase__ : Any = old_model.weight
logger.info(f'{attribute} is initialized.')
UpperCamelCase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase__ : str = old_model.bias
logger.info(f'{attribute} is initialized')
UpperCamelCase__ : List[str] = True
break
elif attribute in special_keys and hasattr(lowerCamelCase_ , 'in_proj_weight'):
UpperCamelCase__ : Dict = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase__ : Optional[int] = getattr(lowerCamelCase_ , lowerCamelCase_)
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase__ : Any = nn.Parameter(old_model.in_proj_weight[:embed_dim, :])
UpperCamelCase__ : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim])
elif attribute == "key_proj":
UpperCamelCase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :])
UpperCamelCase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim])
elif attribute == "value_proj":
UpperCamelCase__ : Any = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :])
UpperCamelCase__ : List[str] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :])
UpperCamelCase__ : List[str] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCamelCase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :])
UpperCamelCase__ : Dict = True
break
if attribute.isdigit():
UpperCamelCase__ : Union[str, Any] = model[int(lowerCamelCase_)]
UpperCamelCase__ : List[Any] = old_model[int(lowerCamelCase_)]
else:
UpperCamelCase__ : Optional[int] = getattr(lowerCamelCase_ , lowerCamelCase_)
if old_attribute == "":
UpperCamelCase__ : List[Any] = old_model
else:
if not hasattr(lowerCamelCase_ , lowerCamelCase_):
raise ValueError(f'{old_model} does not have {old_attribute}')
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!')
print(f'Saving model to {pytorch_dump_folder_path}')
prophet.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> bool:
UpperCamelCase__ : Dict = len(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
UpperCamelCase__ : Dict = [[False for _ in range(m + 1)] for _ in range(n + 1)]
UpperCamelCase__ : List[Any] = True
for i in range(lowerCamelCase_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase__ : Tuple = True
if a[i].islower():
UpperCamelCase__ : Any = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
# add QFormer tokenizer
UpperCamelCase__ : Tuple = qformer_tokenizer
def __call__( self : List[Any] , UpperCAmelCase_ : ImageInput = None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : List[str] , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.')
UpperCamelCase__ : Tuple = BatchFeature()
if text is not None:
UpperCamelCase__ : List[str] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
encoding.update(UpperCAmelCase_)
UpperCamelCase__ : int = self.qformer_tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = qformer_text_encoding.pop('input_ids')
UpperCamelCase__ : Any = qformer_text_encoding.pop('attention_mask')
if images is not None:
UpperCamelCase__ : Optional[int] = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_)
encoding.update(UpperCAmelCase_)
return encoding
def __UpperCamelCase ( self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any]):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any]):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : int = self.tokenizer.model_input_names
UpperCamelCase__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]):
if os.path.isfile(UpperCAmelCase_):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'qformer_tokenizer')
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_)
return super().save_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
@classmethod
def __UpperCamelCase ( cls : Dict , UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ , subfolder='qformer_tokenizer')
UpperCamelCase__ : Union[str, Any] = cls._get_arguments_from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
args.append(UpperCAmelCase_)
return cls(*UpperCAmelCase_)
| 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> set:
UpperCamelCase__ : List[str] = set()
# edges = list of graph's edges
UpperCamelCase__ : Optional[int] = get_edges(lowerCamelCase_)
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCamelCase__, UpperCamelCase__ : List[str] = edges.pop()
chosen_vertices.add(lowerCamelCase_)
chosen_vertices.add(lowerCamelCase_)
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCamelCase_)
return chosen_vertices
def __UpperCAmelCase ( lowerCamelCase_) -> set:
UpperCamelCase__ : Optional[int] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node))
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0522, type=int)
lowerCAmelCase__ = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
lowerCAmelCase__ = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
lowerCAmelCase__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase__ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase__ = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = None
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]=False , **UpperCAmelCase_ : int , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_) != add_prefix_space:
UpperCamelCase__ : List[str] = getattr(UpperCAmelCase_ , pre_tok_state.pop('type'))
UpperCamelCase__ : int = add_prefix_space
UpperCamelCase__ : List[str] = pre_tok_class(**UpperCAmelCase_)
UpperCamelCase__ : List[str] = add_prefix_space
def __UpperCamelCase ( self : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
UpperCamelCase__ : int = kwargs.get('is_split_into_words' , UpperCAmelCase_)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
' pretokenized inputs.')
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict):
UpperCamelCase__ : Any = kwargs.get('is_split_into_words' , UpperCAmelCase_)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
' pretokenized inputs.')
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
UpperCamelCase__ : Any = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) + [self.eos_token_id])
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase__ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __UpperCAmelCase ( ) -> List[str]:
UpperCamelCase__ : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase__ : Any = get_sagemaker_input()
else:
UpperCamelCase__ : Optional[Any] = get_cluster_input()
return config
def __UpperCAmelCase ( lowerCamelCase_=None) -> int:
if subparsers is not None:
UpperCamelCase__ : Tuple = subparsers.add_parser('config' , description=lowerCamelCase_)
else:
UpperCamelCase__ : Tuple = argparse.ArgumentParser('Accelerate config command' , description=lowerCamelCase_)
parser.add_argument(
'--config_file' , default=lowerCamelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_)
return parser
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : Optional[Any] = get_user_input()
if args.config_file is not None:
UpperCamelCase__ : int = args.config_file
else:
if not os.path.isdir(lowerCamelCase_):
os.makedirs(lowerCamelCase_)
UpperCamelCase__ : Tuple = default_yaml_config_file
if config_file.endswith('.json'):
config.to_json_file(lowerCamelCase_)
else:
config.to_yaml_file(lowerCamelCase_)
print(f'accelerate configuration saved at {config_file}')
def __UpperCAmelCase ( ) -> Dict:
UpperCamelCase__ : Dict = config_command_parser()
UpperCamelCase__ : Optional[Any] = parser.parse_args()
config_command(lowerCamelCase_)
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 1 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase__ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase__ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
lowerCAmelCase__ = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
lowerCAmelCase__ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
lowerCAmelCase__ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
lowerCAmelCase__ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 1 |
'''simple docstring'''
class __lowercase :
def __init__( self : str):
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Any = 0
UpperCamelCase__ : int = {}
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : int):
if vertex not in self.adjacency:
UpperCamelCase__ : Tuple = {}
self.num_vertices += 1
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple):
self.add_vertex(UpperCAmelCase_)
self.add_vertex(UpperCAmelCase_)
if head == tail:
return
UpperCamelCase__ : Tuple = weight
UpperCamelCase__ : Optional[int] = weight
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[Any] = self.get_edges()
for edge in edges:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = edge
edges.remove((tail, head, weight))
for i in range(len(UpperCAmelCase_)):
UpperCamelCase__ : Any = list(edges[i])
edges.sort(key=lambda UpperCAmelCase_: e[2])
for i in range(len(UpperCAmelCase_) - 1):
if edges[i][2] >= edges[i + 1][2]:
UpperCamelCase__ : Optional[int] = edges[i][2] + 1
for edge in edges:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = edge
UpperCamelCase__ : Dict = weight
UpperCamelCase__ : List[Any] = weight
def __str__( self : Any):
UpperCamelCase__ : List[Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCamelCase__ : Union[str, Any] = self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip('\n')
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Optional[Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def __UpperCamelCase ( self : List[str]):
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None):
UpperCamelCase__ : Tuple = Graph()
if vertices is None:
UpperCamelCase__ : Tuple = []
if edges is None:
UpperCamelCase__ : List[str] = []
for vertex in vertices:
g.add_vertex(UpperCAmelCase_)
for edge in edges:
g.add_edge(*UpperCAmelCase_)
return g
class __lowercase :
def __init__( self : Tuple):
UpperCamelCase__ : int = {}
UpperCamelCase__ : List[str] = {}
def __len__( self : Any):
return len(self.parent)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
if item in self.parent:
return self.find(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = item
UpperCamelCase__ : Dict = 0
return item
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int]):
if item not in self.parent:
return self.make_set(UpperCAmelCase_)
if item != self.parent[item]:
UpperCamelCase__ : List[str] = self.find(self.parent[item])
return self.parent[item]
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int):
UpperCamelCase__ : List[str] = self.find(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.find(UpperCAmelCase_)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCamelCase__ : Union[str, Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCamelCase__ : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCamelCase__ : Dict = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Optional[Any] = graph.num_vertices
UpperCamelCase__ : Tuple = Graph.UnionFind()
UpperCamelCase__ : Optional[int] = []
while num_components > 1:
UpperCamelCase__ : Tuple = {}
for vertex in graph.get_vertices():
UpperCamelCase__ : Optional[Any] = -1
UpperCamelCase__ : int = graph.get_edges()
for edge in edges:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = edge
edges.remove((tail, head, weight))
for edge in edges:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Optional[Any] = edge
UpperCamelCase__ : Union[str, Any] = union_find.find(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = union_find.find(UpperCAmelCase_)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCamelCase__ : Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCamelCase__ : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = cheap_edge[vertex]
if union_find.find(UpperCAmelCase_) != union_find.find(UpperCAmelCase_):
union_find.union(UpperCAmelCase_ , UpperCAmelCase_)
mst_edges.append(cheap_edge[vertex])
UpperCamelCase__ : int = num_components - 1
UpperCamelCase__ : Dict = Graph.build(edges=UpperCAmelCase_)
return mst
| 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 1 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __lowercase (unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls : Any):
UpperCamelCase__ : str = TOKEN
HfFolder.save_token(UpperCAmelCase_)
@classmethod
def __UpperCamelCase ( cls : List[str]):
try:
delete_repo(token=cls._token , repo_id='test-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config')
except HTTPError:
pass
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('test-config' , use_auth_token=self._token)
UpperCamelCase__ : List[str] = BertConfig.from_pretrained(F'{USER}/test-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id='test-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ , repo_id='test-config' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
UpperCamelCase__ : List[Any] = BertConfig.from_pretrained(F'{USER}/test-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token)
UpperCamelCase__ : Any = BertConfig.from_pretrained('valid_org/test-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase_ , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
UpperCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
def __UpperCamelCase ( self : Any):
CustomConfig.register_for_auto_class()
UpperCamelCase__ : Dict = CustomConfig(attribute=42)
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'})
UpperCamelCase__ : Tuple = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig')
self.assertEqual(new_config.attribute , 42)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCamelCase__ : int = c.n_embd + 1 # int
UpperCamelCase__ : List[str] = c.resid_pdrop + 1.0 # float
UpperCamelCase__ : List[Any] = not c.scale_attn_weights # bool
UpperCamelCase__ : Dict = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}')
self.assertEqual(UpperCAmelCase_ , c.n_embd , 'mismatch for key: n_embd')
self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , 'mismatch for key: resid_pdrop')
self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights')
self.assertEqual(UpperCAmelCase_ , c.summary_type , 'mismatch for key: summary_type')
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Any = PretrainedConfig()
UpperCamelCase__ : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase_ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'])
UpperCamelCase__ : Union[str, Any] = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_)]
if len(UpperCAmelCase_) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(UpperCAmelCase_)}.')
def __UpperCamelCase ( self : Optional[Any]):
with self.assertRaises(UpperCAmelCase_):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder')
UpperCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert')
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase__ : Any = mock.Mock()
UpperCamelCase__ : int = 500
UpperCamelCase__ : str = {}
UpperCamelCase__ : Optional[Any] = HTTPError
UpperCamelCase__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ : Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_) as mock_head:
UpperCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Union[str, Any]):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json')
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = AutoConfig.from_pretrained('bert-base-cased')
UpperCamelCase__ : Tuple = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , 'config.4.0.0.json') , 'w'))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCamelCase__ : List[str] = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCamelCase__ : Dict = ['config.42.0.0.json']
UpperCamelCase__ : Optional[Any] = 768
configuration.save_pretrained(UpperCAmelCase_)
shutil.move(os.path.join(UpperCAmelCase_ , 'config.4.0.0.json') , os.path.join(UpperCAmelCase_ , 'config.42.0.0.json'))
UpperCamelCase__ : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertEqual(new_configuration.hidden_size , 768)
def __UpperCamelCase ( self : List[str]):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
UpperCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
UpperCamelCase__ : int = 'v4.0.0'
UpperCamelCase__, UpperCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase_ , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCamelCase__ : Optional[Any] = 'v3.0.0'
UpperCamelCase__ : List[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertEqual(old_configuration.hidden_size , 768)
| 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase__ = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
lowerCAmelCase__ = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
lowerCAmelCase__ = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Union[str, Any]:
if label_map is not None:
for old_id, new_id in label_map.items():
UpperCamelCase__ : Optional[Any] = new_id
# turn into Numpy arrays
UpperCamelCase__ : str = np.array(lowerCamelCase_)
UpperCamelCase__ : Dict = np.array(lowerCamelCase_)
if reduce_labels:
UpperCamelCase__ : List[str] = 255
UpperCamelCase__ : Union[str, Any] = label - 1
UpperCamelCase__ : Optional[Any] = 255
UpperCamelCase__ : Dict = label != ignore_index
UpperCamelCase__ : List[Any] = np.not_equal(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : int = pred_label[mask]
UpperCamelCase__ : Optional[Any] = np.array(lowerCamelCase_)[mask]
UpperCamelCase__ : Union[str, Any] = pred_label[pred_label == label]
UpperCamelCase__ : int = np.histogram(lowerCamelCase_ , bins=lowerCamelCase_ , range=(0, num_labels - 1))[0]
UpperCamelCase__ : Optional[int] = np.histogram(lowerCamelCase_ , bins=lowerCamelCase_ , range=(0, num_labels - 1))[0]
UpperCamelCase__ : Optional[int] = np.histogram(lowerCamelCase_ , bins=lowerCamelCase_ , range=(0, num_labels - 1))[0]
UpperCamelCase__ : int = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> List[Any]:
UpperCamelCase__ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa)
UpperCamelCase__ : Dict = np.zeros((num_labels,) , dtype=np.floataa)
UpperCamelCase__ : int = np.zeros((num_labels,) , dtype=np.floataa)
UpperCamelCase__ : Tuple = np.zeros((num_labels,) , dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = intersect_and_union(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Optional[Any]:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Dict = total_intersect_and_union(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# compute metrics
UpperCamelCase__ : Optional[int] = {}
UpperCamelCase__ : List[Any] = total_area_intersect.sum() / total_area_label.sum()
UpperCamelCase__ : Tuple = total_area_intersect / total_area_union
UpperCamelCase__ : str = total_area_intersect / total_area_label
UpperCamelCase__ : Union[str, Any] = np.nanmean(lowerCamelCase_)
UpperCamelCase__ : List[Any] = np.nanmean(lowerCamelCase_)
UpperCamelCase__ : Any = all_acc
UpperCamelCase__ : Optional[int] = iou
UpperCamelCase__ : Union[str, Any] = acc
if nan_to_num is not None:
UpperCamelCase__ : Optional[int] = {metric: np.nan_to_num(lowerCamelCase_ , nan=lowerCamelCase_) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
def __UpperCamelCase ( self : Optional[int]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))),
}) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
UpperCamelCase__ : Tuple = mean_iou(
results=UpperCAmelCase_ , gt_seg_maps=UpperCAmelCase_ , num_labels=UpperCAmelCase_ , ignore_index=UpperCAmelCase_ , nan_to_num=UpperCAmelCase_ , label_map=UpperCAmelCase_ , reduce_labels=UpperCAmelCase_ , )
return iou_result
| 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=100 , lowerCamelCase_=" ") -> List[str]:
UpperCamelCase__ : Optional[int] = text.split(lowerCamelCase_)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(lowerCamelCase_) , lowerCamelCase_)]
def __UpperCAmelCase ( lowerCamelCase_) -> dict:
UpperCamelCase__, UpperCamelCase__ : Any = [], []
for title, text in zip(documents['title'] , documents['text']):
if text is not None:
for passage in split_text(lowerCamelCase_):
titles.append(title if title is not None else '')
texts.append(lowerCamelCase_)
return {"title": titles, "text": texts}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> dict:
UpperCamelCase__ : str = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=lowerCamelCase_ , padding='longest' , return_tensors='pt')['input_ids']
UpperCamelCase__ : Dict = ctx_encoder(input_ids.to(device=lowerCamelCase_) , return_dict=lowerCamelCase_).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> int:
######################################
logger.info('Step 1 - Create the dataset')
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCamelCase__ : Union[str, Any] = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCamelCase__ : str = dataset.map(lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=processing_args.num_proc)
# And compute the embeddings
UpperCamelCase__ : Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=lowerCamelCase_)
UpperCamelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
UpperCamelCase__ : str = Features(
{'text': Value('string'), 'title': Value('string'), 'embeddings': Sequence(Value('float32'))}) # optional, save as float32 instead of float64 to save space
UpperCamelCase__ : List[Any] = dataset.map(
partial(lowerCamelCase_ , ctx_encoder=lowerCamelCase_ , ctx_tokenizer=lowerCamelCase_) , batched=lowerCamelCase_ , batch_size=processing_args.batch_size , features=lowerCamelCase_ , )
# And finally save your dataset
UpperCamelCase__ : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset')
dataset.save_to_disk(lowerCamelCase_)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset')
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCamelCase__ : List[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('embeddings' , custom_index=lowerCamelCase_)
# And save the index
UpperCamelCase__ : int = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss')
dataset.get_index('embeddings').save(lowerCamelCase_)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __lowercase :
_lowerCamelCase = field(
default=str(Path(__lowerCamelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
_lowerCamelCase = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
_lowerCamelCase = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
_lowerCamelCase = field(
default=str(Path(__lowerCamelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class __lowercase :
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
_lowerCamelCase = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class __lowercase :
_lowerCamelCase = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowerCAmelCase__ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowerCAmelCase__ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowerCAmelCase__ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
def __UpperCamelCase ( self : Tuple):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=False):
if return_pvalue:
UpperCamelCase__ : Dict = pearsonr(UpperCAmelCase_ , UpperCAmelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase_ , UpperCAmelCase_)[0])}
| 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''dpr'''
def __init__( self : str , UpperCAmelCase_ : List[Any]=30_522 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Dict=3_072 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[int]=1e-12 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : str="absolute" , UpperCAmelCase_ : int = 0 , **UpperCAmelCase_ : Any , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : Tuple = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : List[Any] = type_vocab_size
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : Any = layer_norm_eps
UpperCamelCase__ : Optional[Any] = projection_dim
UpperCamelCase__ : Union[str, Any] = position_embedding_type
| 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : str=1_000 , UpperCAmelCase_ : Any=[3, 3, 6, 4] , UpperCAmelCase_ : Tuple=[48, 56, 112, 220] , ):
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Tuple = batch_size
UpperCamelCase__ : Union[str, Any] = num_channels
UpperCamelCase__ : List[str] = is_training
UpperCamelCase__ : Optional[Any] = use_labels
UpperCamelCase__ : Tuple = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = num_labels
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : str = layer_depths
UpperCamelCase__ : str = embed_dims
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : str = None
if self.use_labels:
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_labels)
UpperCamelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCAmelCase_ , layer_scale_init_value=1e-5 , )
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Dict = SwiftFormerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7))
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Dict = self.num_labels
UpperCamelCase__ : Union[str, Any] = SwiftFormerForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : str = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
UpperCamelCase__ : Union[str, Any] = SwiftFormerForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self : List[str]):
((UpperCamelCase__), (UpperCamelCase__), (UpperCamelCase__)) : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_lowerCamelCase = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Union[str, Any] = SwiftFormerModelTester(self)
UpperCamelCase__ : Optional[int] = ConfigTester(
self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __UpperCamelCase ( self : Optional[int]):
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds')
def __UpperCamelCase ( self : int):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__, UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
UpperCamelCase__ : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def __UpperCamelCase ( self : List[Any]):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = SwiftFormerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skip(reason='SwiftFormer does not output attentions')
def __UpperCamelCase ( self : Optional[Any]):
pass
def __UpperCamelCase ( self : Union[str, Any]):
def check_hidden_states_output(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict):
UpperCamelCase__ : Optional[int] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
UpperCamelCase__ : Any = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : List[Any] = outputs.hidden_states
UpperCamelCase__ : Any = 8
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(UpperCAmelCase_)):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
]) , )
UpperCamelCase__, UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : str = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
def _config_zero_init(UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = copy.deepcopy(UpperCAmelCase_)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , 1e-10)
if isinstance(getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = _config_zero_init(getattr(UpperCAmelCase_ , UpperCAmelCase_))
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return configs_no_init
UpperCamelCase__, UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Dict = _config_zero_init(UpperCAmelCase_)
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(config=UpperCAmelCase_)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : List[str]):
pass
def __UpperCAmelCase ( ) -> Dict:
UpperCamelCase__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[Any]):
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs').to(UpperCAmelCase_)
UpperCamelCase__ : Any = self.default_image_processor
UpperCamelCase__ : Tuple = prepare_img()
UpperCamelCase__ : Dict = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
# verify the logits
UpperCamelCase__ : Dict = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 1 |
'''simple docstring'''
import sys
import turtle
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_) , get_mid(lowerCamelCase_ , lowerCamelCase_) , depth - 1)
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_) , get_mid(lowerCamelCase_ , lowerCamelCase_) , depth - 1)
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_) , get_mid(lowerCamelCase_ , lowerCamelCase_) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
lowerCAmelCase__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
lowerCAmelCase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = 'ZinengTang/tvlt-base'
UpperCamelCase__ : Optional[int] = tempfile.mkdtemp()
def __UpperCamelCase ( self : Tuple , **UpperCAmelCase_ : Any):
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase_)
def __UpperCamelCase ( self : str , **UpperCAmelCase_ : int):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase__ : Any = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Union[str, Any] = TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_)
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : Tuple = self.get_feature_extractor()
UpperCamelCase__ : Union[str, Any] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_)
UpperCamelCase__ : List[str] = np.ones([12_000])
UpperCamelCase__ : Optional[Any] = feature_extractor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[Any] = processor(audio=UpperCAmelCase_ , return_tensors='np')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : int = self.get_image_processor()
UpperCamelCase__ : Optional[int] = self.get_feature_extractor()
UpperCamelCase__ : Union[str, Any] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_)
UpperCamelCase__ : Dict = np.ones([3, 224, 224])
UpperCamelCase__ : str = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Any = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : List[Any] = self.get_feature_extractor()
UpperCamelCase__ : Tuple = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_)
UpperCamelCase__ : int = np.ones([12_000])
UpperCamelCase__ : Optional[Any] = np.ones([3, 224, 224])
UpperCamelCase__ : str = processor(audio=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_feature_extractor()
UpperCamelCase__ : Dict = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowercase (__lowerCamelCase ):
def __init__( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Tuple = params
UpperCamelCase__ : str = np.array(UpperCAmelCase_)
UpperCamelCase__ : Tuple = np.array([len(UpperCAmelCase_) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : int , UpperCAmelCase_ : List[Any]):
return (self.token_ids[index], self.lengths[index])
def __len__( self : Any):
return len(self.lengths)
def __UpperCamelCase ( self : Union[str, Any]):
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : str = self.params.max_model_input_size
UpperCamelCase__ : List[Any] = self.lengths > max_len
logger.info(F'Splitting {sum(UpperCAmelCase_)} too long sequences.')
def divide_chunks(UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return [l[i : i + n] for i in range(0 , len(UpperCAmelCase_) , UpperCAmelCase_)]
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Tuple = []
if self.params.mlm:
UpperCamelCase__, UpperCamelCase__ : int = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
UpperCamelCase__ : Union[str, Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
UpperCamelCase__ : str = np.insert(UpperCAmelCase_ , 0 , UpperCAmelCase_)
if sub_s[-1] != sep_id:
UpperCamelCase__ : Dict = np.insert(UpperCAmelCase_ , len(UpperCAmelCase_) , UpperCAmelCase_)
assert len(UpperCAmelCase_) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCAmelCase_)
new_tok_ids.extend(UpperCAmelCase_)
new_lengths.extend([len(UpperCAmelCase_) for l in sub_seqs])
UpperCamelCase__ : List[Any] = np.array(UpperCAmelCase_)
UpperCamelCase__ : Dict = np.array(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Tuple = len(self)
UpperCamelCase__ : Union[str, Any] = self.lengths > 11
UpperCamelCase__ : List[Any] = self.token_ids[indices]
UpperCamelCase__ : str = self.lengths[indices]
UpperCamelCase__ : Optional[int] = len(self)
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.')
def __UpperCamelCase ( self : Optional[Any]):
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase__ : Optional[int] = self.params.special_tok_ids['unk_token']
UpperCamelCase__ : Optional[int] = len(self)
UpperCamelCase__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
UpperCamelCase__ : Optional[int] = (unk_occs / self.lengths) < 0.5
UpperCamelCase__ : Union[str, Any] = self.token_ids[indices]
UpperCamelCase__ : str = self.lengths[indices]
UpperCamelCase__ : Optional[Any] = len(self)
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).')
def __UpperCamelCase ( self : Any):
if not self.params.is_master:
return
logger.info(F'{len(self)} sequences')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Dict):
UpperCamelCase__ : Optional[Any] = [t[0] for t in batch]
UpperCamelCase__ : Optional[int] = [t[1] for t in batch]
assert len(UpperCAmelCase_) == len(UpperCAmelCase_)
# Max for paddings
UpperCamelCase__ : List[str] = max(UpperCAmelCase_)
# Pad token ids
if self.params.mlm:
UpperCamelCase__ : Union[str, Any] = self.params.special_tok_ids['pad_token']
else:
UpperCamelCase__ : List[Any] = self.params.special_tok_ids['unk_token']
UpperCamelCase__ : Union[str, Any] = [list(t.astype(UpperCAmelCase_)) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase_)) for t in token_ids]
assert len(tk_) == len(UpperCAmelCase_)
assert all(len(UpperCAmelCase_) == max_seq_len_ for t in tk_)
UpperCamelCase__ : List[str] = torch.tensor(tk_) # (bs, max_seq_len_)
UpperCamelCase__ : Union[str, Any] = torch.tensor(UpperCAmelCase_) # (bs)
return tk_t, lg_t
| 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : str = 0
while b > 0:
if b & 1:
UpperCamelCase__ : Tuple = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "https://www.worldometers.info/coronavirus") -> dict:
UpperCamelCase__ : List[str] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Dict = soup.findAll('h1')
UpperCamelCase__ : Dict = soup.findAll('div' , {'class': 'maincounter-number'})
keys += soup.findAll('span' , {'class': 'panel-title'})
values += soup.findAll('div' , {'class': 'number-table-main'})
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase_ , lowerCamelCase_)}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Dict = SamImageProcessor()
UpperCamelCase__ : List[Any] = SamProcessor(UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Any):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_).image_processor
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : int = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : int = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : Optional[Any] = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : str = processor(images=UpperCAmelCase_ , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_torch
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : str = [torch.ones((1, 3, 5, 5))]
UpperCamelCase__ : List[Any] = [[1_764, 2_646]]
UpperCamelCase__ : Optional[int] = [[683, 1_024]]
UpperCamelCase__ : int = processor.post_process_masks(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
UpperCamelCase__ : Tuple = processor.post_process_masks(
UpperCAmelCase_ , torch.tensor(UpperCAmelCase_) , torch.tensor(UpperCAmelCase_))
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
# should also work with np
UpperCamelCase__ : Union[str, Any] = [np.ones((1, 3, 5, 5))]
UpperCamelCase__ : Optional[int] = processor.post_process_masks(UpperCAmelCase_ , np.array(UpperCAmelCase_) , np.array(UpperCAmelCase_))
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
UpperCamelCase__ : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase_):
UpperCamelCase__ : List[Any] = processor.post_process_masks(UpperCAmelCase_ , np.array(UpperCAmelCase_) , np.array(UpperCAmelCase_))
@require_vision
@require_tf
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Tuple = SamImageProcessor()
UpperCamelCase__ : List[Any] = SamProcessor(UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_).image_processor
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Tuple = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : Optional[Any] = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[Any] = processor(images=UpperCAmelCase_ , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_tf
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Tuple = self.get_image_processor()
UpperCamelCase__ : Optional[int] = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [tf.ones((1, 3, 5, 5))]
UpperCamelCase__ : Optional[int] = [[1_764, 2_646]]
UpperCamelCase__ : Optional[int] = [[683, 1_024]]
UpperCamelCase__ : Optional[Any] = processor.post_process_masks(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
UpperCamelCase__ : int = processor.post_process_masks(
UpperCAmelCase_ , tf.convert_to_tensor(UpperCAmelCase_) , tf.convert_to_tensor(UpperCAmelCase_) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
# should also work with np
UpperCamelCase__ : List[Any] = [np.ones((1, 3, 5, 5))]
UpperCamelCase__ : int = processor.post_process_masks(
UpperCAmelCase_ , np.array(UpperCAmelCase_) , np.array(UpperCAmelCase_) , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
UpperCamelCase__ : Optional[Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
UpperCamelCase__ : List[Any] = processor.post_process_masks(
UpperCAmelCase_ , np.array(UpperCAmelCase_) , np.array(UpperCAmelCase_) , return_tensors='tf')
@require_vision
@require_torchvision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
UpperCamelCase__ : str = tempfile.mkdtemp()
UpperCamelCase__ : Union[str, Any] = SamImageProcessor()
UpperCamelCase__ : List[Any] = SamProcessor(UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : List[str] , **UpperCAmelCase_ : List[str]):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_).image_processor
def __UpperCamelCase ( self : Dict):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : int = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : int = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : str = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa)
UpperCamelCase__ : List[Any] = [tf.convert_to_tensor(UpperCAmelCase_)]
UpperCamelCase__ : str = [torch.tensor(UpperCAmelCase_)]
UpperCamelCase__ : List[Any] = [[1_764, 2_646]]
UpperCamelCase__ : List[Any] = [[683, 1_024]]
UpperCamelCase__ : Any = processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='tf')
UpperCamelCase__ : Tuple = processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Any = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='pt')['pixel_values'].numpy()
UpperCamelCase__ : Tuple = processor(images=UpperCAmelCase_ , return_tensors='pt')['pixel_values'].numpy()
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='tf')['pixel_values'].numpy()
UpperCamelCase__ : Dict = processor(images=UpperCAmelCase_ , return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_))
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_))
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_))
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 1 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowercase (__lowerCamelCase ):
def __get__( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
UpperCamelCase__ : List[str] = '__cached_' + self.fget.__name__
UpperCamelCase__ : Dict = getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if cached is None:
UpperCamelCase__ : Union[str, Any] = self.fget(UpperCAmelCase_)
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return cached
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : str = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}')
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if is_torch_fx_proxy(lowerCamelCase_):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase_ , torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase_ , tf.Tensor):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase_ , (jnp.ndarray, Tracer)):
return True
return isinstance(lowerCamelCase_ , np.ndarray)
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
return isinstance(lowerCamelCase_ , np.ndarray)
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
return _is_numpy(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
import torch
return isinstance(lowerCamelCase_ , torch.Tensor)
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
return False if not is_torch_available() else _is_torch(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
import torch
return isinstance(lowerCamelCase_ , torch.device)
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
return False if not is_torch_available() else _is_torch_device(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
import torch
if isinstance(lowerCamelCase_ , lowerCamelCase_):
if hasattr(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
else:
return False
return isinstance(lowerCamelCase_ , torch.dtype)
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
import tensorflow as tf
return isinstance(lowerCamelCase_ , tf.Tensor)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase_ , 'is_symbolic_tensor'):
return tf.is_symbolic_tensor(lowerCamelCase_)
return type(lowerCamelCase_) == tf.Tensor
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase_ , jnp.ndarray)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
return False if not is_flax_available() else _is_jax(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
if isinstance(lowerCamelCase_ , (dict, UserDict)):
return {k: to_py_obj(lowerCamelCase_) for k, v in obj.items()}
elif isinstance(lowerCamelCase_ , (list, tuple)):
return [to_py_obj(lowerCamelCase_) for o in obj]
elif is_tf_tensor(lowerCamelCase_):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase_):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase_):
return np.asarray(lowerCamelCase_).tolist()
elif isinstance(lowerCamelCase_ , (np.ndarray, np.number)): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __UpperCAmelCase ( lowerCamelCase_) -> int:
if isinstance(lowerCamelCase_ , (dict, UserDict)):
return {k: to_numpy(lowerCamelCase_) for k, v in obj.items()}
elif isinstance(lowerCamelCase_ , (list, tuple)):
return np.array(lowerCamelCase_)
elif is_tf_tensor(lowerCamelCase_):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase_):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase_):
return np.asarray(lowerCamelCase_)
else:
return obj
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = fields(self)
# Safety and consistency checks
if not len(UpperCAmelCase_):
raise ValueError(F'{self.__class__.__name__} has no fields.')
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.')
UpperCamelCase__ : Optional[int] = getattr(self , class_fields[0].name)
UpperCamelCase__ : Dict = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(UpperCAmelCase_):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Tuple = first_field.items()
UpperCamelCase__ : Tuple = True
else:
try:
UpperCamelCase__ : List[str] = iter(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = True
except TypeError:
UpperCamelCase__ : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCAmelCase_):
if (
not isinstance(UpperCAmelCase_ , (list, tuple))
or not len(UpperCAmelCase_) == 2
or not isinstance(element[0] , UpperCAmelCase_)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCamelCase__ : int = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).')
break
setattr(self , element[0] , element[1])
if element[1] is not None:
UpperCamelCase__ : Optional[int] = element[1]
elif first_field is not None:
UpperCamelCase__ : Optional[int] = first_field
else:
for field in class_fields:
UpperCamelCase__ : List[str] = getattr(self , field.name)
if v is not None:
UpperCamelCase__ : List[str] = v
def __delitem__( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def __UpperCamelCase ( self : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]):
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def __UpperCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def __UpperCamelCase ( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str]):
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__( self : Any , UpperCAmelCase_ : str):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Any):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCAmelCase_ , UpperCAmelCase_)
super().__setattr__(UpperCAmelCase_ , UpperCAmelCase_)
def __setitem__( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]):
# Will raise a KeyException if needed
super().__setitem__(UpperCAmelCase_ , UpperCAmelCase_)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
return tuple(self[k] for k in self.keys())
class __lowercase (__lowerCamelCase , __lowerCamelCase ):
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , UpperCAmelCase_ : Dict):
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}')
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''longest'''
_lowerCamelCase = '''max_length'''
_lowerCamelCase = '''do_not_pad'''
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''pt'''
_lowerCamelCase = '''tf'''
_lowerCamelCase = '''np'''
_lowerCamelCase = '''jax'''
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : List[ContextManager]):
UpperCamelCase__ : Optional[Any] = context_managers
UpperCamelCase__ : List[str] = ExitStack()
def __enter__( self : int):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCAmelCase_)
def __exit__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int]):
self.stack.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Dict = infer_framework(lowerCamelCase_)
if framework == "tf":
UpperCamelCase__ : Union[str, Any] = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
UpperCamelCase__ : Dict = inspect.signature(model_class.forward) # PyTorch models
else:
UpperCamelCase__ : Optional[int] = inspect.signature(model_class.__call__) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Dict = model_class.__name__
UpperCamelCase__ : Union[str, Any] = infer_framework(lowerCamelCase_)
if framework == "tf":
UpperCamelCase__ : Optional[Any] = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
UpperCamelCase__ : Tuple = inspect.signature(model_class.forward) # PyTorch models
else:
UpperCamelCase__ : Tuple = inspect.signature(model_class.__call__) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "" , lowerCamelCase_ = ".") -> Optional[Any]:
def _flatten_dict(lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_="."):
for k, v in d.items():
UpperCamelCase__ : Any = str(lowerCamelCase_) + delimiter + str(lowerCamelCase_) if parent_key else k
if v and isinstance(lowerCamelCase_ , lowerCamelCase_):
yield from flatten_dict(lowerCamelCase_ , lowerCamelCase_ , delimiter=lowerCamelCase_).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_))
@contextmanager
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = False) -> List[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> str:
if is_numpy_array(lowerCamelCase_):
return np.transpose(lowerCamelCase_ , axes=lowerCamelCase_)
elif is_torch_tensor(lowerCamelCase_):
return array.T if axes is None else array.permute(*lowerCamelCase_)
elif is_tf_tensor(lowerCamelCase_):
import tensorflow as tf
return tf.transpose(lowerCamelCase_ , perm=lowerCamelCase_)
elif is_jax_tensor(lowerCamelCase_):
return jnp.transpose(lowerCamelCase_ , axes=lowerCamelCase_)
else:
raise ValueError(f'Type not supported for transpose: {type(lowerCamelCase_)}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
if is_numpy_array(lowerCamelCase_):
return np.reshape(lowerCamelCase_ , lowerCamelCase_)
elif is_torch_tensor(lowerCamelCase_):
return array.reshape(*lowerCamelCase_)
elif is_tf_tensor(lowerCamelCase_):
import tensorflow as tf
return tf.reshape(lowerCamelCase_ , lowerCamelCase_)
elif is_jax_tensor(lowerCamelCase_):
return jnp.reshape(lowerCamelCase_ , lowerCamelCase_)
else:
raise ValueError(f'Type not supported for reshape: {type(lowerCamelCase_)}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> str:
if is_numpy_array(lowerCamelCase_):
return np.squeeze(lowerCamelCase_ , axis=lowerCamelCase_)
elif is_torch_tensor(lowerCamelCase_):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase_)
elif is_tf_tensor(lowerCamelCase_):
import tensorflow as tf
return tf.squeeze(lowerCamelCase_ , axis=lowerCamelCase_)
elif is_jax_tensor(lowerCamelCase_):
return jnp.squeeze(lowerCamelCase_ , axis=lowerCamelCase_)
else:
raise ValueError(f'Type not supported for squeeze: {type(lowerCamelCase_)}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
if is_numpy_array(lowerCamelCase_):
return np.expand_dims(lowerCamelCase_ , lowerCamelCase_)
elif is_torch_tensor(lowerCamelCase_):
return array.unsqueeze(dim=lowerCamelCase_)
elif is_tf_tensor(lowerCamelCase_):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase_ , axis=lowerCamelCase_)
elif is_jax_tensor(lowerCamelCase_):
return jnp.expand_dims(lowerCamelCase_ , axis=lowerCamelCase_)
else:
raise ValueError(f'Type not supported for expand_dims: {type(lowerCamelCase_)}.')
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
if is_numpy_array(lowerCamelCase_):
return np.size(lowerCamelCase_)
elif is_torch_tensor(lowerCamelCase_):
return array.numel()
elif is_tf_tensor(lowerCamelCase_):
import tensorflow as tf
return tf.size(lowerCamelCase_)
elif is_jax_tensor(lowerCamelCase_):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(lowerCamelCase_)}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for key, value in auto_map.items():
if isinstance(lowerCamelCase_ , (tuple, list)):
UpperCamelCase__ : Tuple = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCamelCase__ : Optional[Any] = f'{repo_id}--{value}'
return auto_map
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
for base_class in inspect.getmro(lowerCamelCase_):
UpperCamelCase__ : Optional[Any] = base_class.__module__
UpperCamelCase__ : Optional[int] = base_class.__name__
if module.startswith('tensorflow') or module.startswith('keras') or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch') or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax') or module.startswith('jax') or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.')
| 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''imagegpt'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , UpperCAmelCase_ : Dict=512 + 1 , UpperCAmelCase_ : int=32 * 32 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : List[Any]=24 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]="quick_gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=1e-5 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Any=False , **UpperCAmelCase_ : List[str] , ):
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : int = n_positions
UpperCamelCase__ : List[str] = n_embd
UpperCamelCase__ : Tuple = n_layer
UpperCamelCase__ : Dict = n_head
UpperCamelCase__ : Any = n_inner
UpperCamelCase__ : Union[str, Any] = activation_function
UpperCamelCase__ : Tuple = resid_pdrop
UpperCamelCase__ : str = embd_pdrop
UpperCamelCase__ : List[Any] = attn_pdrop
UpperCamelCase__ : str = layer_norm_epsilon
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Dict = scale_attn_weights
UpperCamelCase__ : Dict = use_cache
UpperCamelCase__ : Union[str, Any] = scale_attn_by_inverse_layer_idx
UpperCamelCase__ : int = reorder_and_upcast_attn
UpperCamelCase__ : List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_)
class __lowercase (__lowerCamelCase ):
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
])
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : "FeatureExtractionMixin" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , ):
UpperCamelCase__ : str = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : List[str] = dict(preprocessor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_))
return inputs
| 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = 42
_lowerCamelCase = jnp.floataa
_lowerCamelCase = True
def __UpperCamelCase ( self : Optional[Any]):
super().setup()
UpperCamelCase__ : int = nn.Dense(5 , dtype=self.dtype)
def __call__( self : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict):
UpperCamelCase__ : str = super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : str = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = FlaxBigBirdForNaturalQuestionsModule
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
def cross_entropy(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None):
UpperCamelCase__ : Tuple = logits.shape[-1]
UpperCamelCase__ : Any = (labels[..., None] == jnp.arange(lowerCamelCase_)[None]).astype('f4')
UpperCamelCase__ : Tuple = jax.nn.log_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase__ : List[str] = -jnp.sum(labels * logits , axis=-1)
if reduction is not None:
UpperCamelCase__ : List[str] = reduction(lowerCamelCase_)
return loss
UpperCamelCase__ : int = partial(lowerCamelCase_ , reduction=jnp.mean)
UpperCamelCase__ : int = cross_entropy(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : str = cross_entropy(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Dict = cross_entropy(lowerCamelCase_ , lowerCamelCase_)
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __lowercase :
_lowerCamelCase = "google/bigbird-roberta-base"
_lowerCamelCase = 3000
_lowerCamelCase = 10500
_lowerCamelCase = 128
_lowerCamelCase = 3
_lowerCamelCase = 1
_lowerCamelCase = 5
# tx_args
_lowerCamelCase = 3E-5
_lowerCamelCase = 0.0
_lowerCamelCase = 20000
_lowerCamelCase = 0.0095
_lowerCamelCase = "bigbird-roberta-natural-questions"
_lowerCamelCase = "training-expt"
_lowerCamelCase = "data/nq-training.jsonl"
_lowerCamelCase = "data/nq-validation.jsonl"
def __UpperCamelCase ( self : List[Any]):
os.makedirs(self.base_dir , exist_ok=UpperCAmelCase_)
UpperCamelCase__ : str = os.path.join(self.base_dir , self.save_dir)
UpperCamelCase__ : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = 4096 # no dynamic padding on TPUs
def __call__( self : Dict , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = self.collate_fn(UpperCAmelCase_)
UpperCamelCase__ : Tuple = jax.tree_util.tree_map(UpperCAmelCase_ , UpperCAmelCase_)
return batch
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__, UpperCamelCase__ : List[Any] = self.fetch_inputs(features['input_ids'])
UpperCamelCase__ : Union[str, Any] = {
'input_ids': jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
'attention_mask': jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : list):
UpperCamelCase__ : int = [self._fetch_inputs(UpperCAmelCase_) for ids in input_ids]
return zip(*UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list):
UpperCamelCase__ : Dict = [1 for _ in range(len(UpperCAmelCase_))]
while len(UpperCAmelCase_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if seed is not None:
UpperCamelCase__ : str = dataset.shuffle(seed=lowerCamelCase_)
for i in range(len(lowerCamelCase_) // batch_size):
UpperCamelCase__ : Any = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase_)
@partial(jax.pmap , axis_name='batch')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
def loss_fn(lowerCamelCase_):
UpperCamelCase__ : List[Any] = model_inputs.pop('start_labels')
UpperCamelCase__ : Any = model_inputs.pop('end_labels')
UpperCamelCase__ : Optional[Any] = model_inputs.pop('pooled_labels')
UpperCamelCase__ : int = state.apply_fn(**lowerCamelCase_ , params=lowerCamelCase_ , dropout_rng=lowerCamelCase_ , train=lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Any = outputs
return state.loss_fn(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__, UpperCamelCase__ : str = jax.random.split(lowerCamelCase_)
UpperCamelCase__ : str = jax.value_and_grad(lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__ : Any = grad_fn(state.params)
UpperCamelCase__ : Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch')
UpperCamelCase__ : Optional[Any] = jax.lax.pmean(lowerCamelCase_ , 'batch')
UpperCamelCase__ : List[Any] = state.apply_gradients(grads=lowerCamelCase_)
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch')
def __UpperCAmelCase ( lowerCamelCase_ , **lowerCamelCase_) -> Dict:
UpperCamelCase__ : Tuple = model_inputs.pop('start_labels')
UpperCamelCase__ : Dict = model_inputs.pop('end_labels')
UpperCamelCase__ : Any = model_inputs.pop('pooled_labels')
UpperCamelCase__ : Optional[Any] = state.apply_fn(**lowerCamelCase_ , params=state.params , train=lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = outputs
UpperCamelCase__ : str = state.loss_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = jax.lax.pmean({'loss': loss} , axis_name='batch')
return metrics
class __lowercase (train_state.TrainState ):
_lowerCamelCase = struct.field(pytree_node=__lowerCamelCase )
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=None):
UpperCamelCase__ : Any = model.params
UpperCamelCase__ : str = TrainState.create(
apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , loss_fn=UpperCAmelCase_ , )
if ckpt_dir is not None:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Optional[int] = restore_checkpoint(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Tuple = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = build_tx(**UpperCAmelCase_)
UpperCamelCase__ : List[Any] = train_state.TrainState(
step=UpperCAmelCase_ , apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , opt_state=UpperCAmelCase_ , )
UpperCamelCase__ : Tuple = args
UpperCamelCase__ : Optional[Any] = data_collator
UpperCamelCase__ : str = lr
UpperCamelCase__ : Any = params
UpperCamelCase__ : Any = jax_utils.replicate(UpperCAmelCase_)
return state
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str):
UpperCamelCase__ : int = self.args
UpperCamelCase__ : str = len(UpperCAmelCase_) // args.batch_size
UpperCamelCase__ : Tuple = jax.random.PRNGKey(0)
UpperCamelCase__ : Optional[Any] = jax.random.split(UpperCAmelCase_ , jax.device_count())
for epoch in range(args.max_epochs):
UpperCamelCase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
UpperCamelCase__ : Optional[int] = get_batched_dataset(UpperCAmelCase_ , args.batch_size , seed=UpperCAmelCase_)
UpperCamelCase__ : Dict = 0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc=F'Running EPOCH-{epoch}'):
UpperCamelCase__ : str = self.data_collator(UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Dict = self.train_step_fn(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
UpperCamelCase__ : Optional[Any] = jax_utils.unreplicate(state.step)
UpperCamelCase__ : List[Any] = running_loss.item() / i
UpperCamelCase__ : Optional[Any] = self.scheduler_fn(state_step - 1)
UpperCamelCase__ : List[str] = self.evaluate(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Dict = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(UpperCAmelCase_))
self.logger.log(UpperCAmelCase_ , commit=UpperCAmelCase_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int):
UpperCamelCase__ : str = get_batched_dataset(UpperCAmelCase_ , self.args.batch_size)
UpperCamelCase__ : Any = len(UpperCAmelCase_) // self.args.batch_size
UpperCamelCase__ : Optional[Any] = jnp.array(0 , dtype=jnp.floataa)
UpperCamelCase__ : Optional[Any] = 0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc='Evaluating ... '):
UpperCamelCase__ : Optional[Any] = self.data_collator(UpperCAmelCase_)
UpperCamelCase__ : str = self.val_step_fn(UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Union[str, Any] = jax_utils.unreplicate(UpperCAmelCase_)
print(F'SAVING CHECKPOINT IN {save_dir}' , end=' ... ')
self.model_save_fn(UpperCAmelCase_ , params=state.params)
with open(os.path.join(UpperCAmelCase_ , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(UpperCAmelCase_ , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(UpperCAmelCase_ , 'data_collator.joblib'))
with open(os.path.join(UpperCAmelCase_ , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , UpperCAmelCase_)
print('DONE')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Dict:
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ')
with open(os.path.join(lowerCamelCase_ , 'flax_model.msgpack') , 'rb') as f:
UpperCamelCase__ : Optional[Any] = from_bytes(state.params , f.read())
with open(os.path.join(lowerCamelCase_ , 'opt_state.msgpack') , 'rb') as f:
UpperCamelCase__ : Any = from_bytes(state.opt_state , f.read())
UpperCamelCase__ : Union[str, Any] = joblib.load(os.path.join(lowerCamelCase_ , 'args.joblib'))
UpperCamelCase__ : int = joblib.load(os.path.join(lowerCamelCase_ , 'data_collator.joblib'))
with open(os.path.join(lowerCamelCase_ , 'training_state.json') , 'r') as f:
UpperCamelCase__ : str = json.load(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = training_state['step']
print('DONE')
return params, opt_state, step, args, data_collator
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : List[str] = num_train_steps - warmup_steps
UpperCamelCase__ : List[Any] = optax.linear_schedule(init_value=lowerCamelCase_ , end_value=lowerCamelCase_ , transition_steps=lowerCamelCase_)
UpperCamelCase__ : Dict = optax.linear_schedule(init_value=lowerCamelCase_ , end_value=1e-7 , transition_steps=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps])
return lr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
def weight_decay_mask(lowerCamelCase_):
UpperCamelCase__ : int = traverse_util.flatten_dict(lowerCamelCase_)
UpperCamelCase__ : Dict = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase_)
UpperCamelCase__ : str = scheduler_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : str = optax.adamw(learning_rate=lowerCamelCase_ , weight_decay=lowerCamelCase_ , mask=lowerCamelCase_)
return tx, lr
| 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCAmelCase__ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase__ = concatenate_datasets
lowerCAmelCase__ = DownloadConfig
lowerCAmelCase__ = DownloadManager
lowerCAmelCase__ = DownloadMode
lowerCAmelCase__ = DownloadConfig
lowerCAmelCase__ = DownloadMode
lowerCAmelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 1 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : int , UpperCAmelCase_ : str):
with open(UpperCAmelCase_ , encoding='utf-8') as input_file:
UpperCamelCase__ : List[Any] = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)')
UpperCamelCase__ : Dict = input_file.read()
UpperCamelCase__ : Dict = regexp.search(UpperCAmelCase_)
return match
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
with open(UpperCAmelCase_ , encoding='utf-8') as input_file:
UpperCamelCase__ : Tuple = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL)
UpperCamelCase__ : List[str] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Union[str, Any] = regexp.finditer(UpperCAmelCase_)
UpperCamelCase__ : Tuple = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = Path('./datasets')
UpperCamelCase__ : List[str] = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase_)):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}')
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Optional[Any] = Path('./datasets')
UpperCamelCase__ : str = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase_)):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
# Initialise PyTorch model
UpperCamelCase__ : int = FunnelConfig.from_json_file(lowerCamelCase_)
print(f'Building PyTorch model from configuration: {config}')
UpperCamelCase__ : Tuple = FunnelBaseModel(lowerCamelCase_) if base_model else FunnelModel(lowerCamelCase_)
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowercase :
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class __lowercase :
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_lowerCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __UpperCAmelCase ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , lowerCamelCase_)
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_)
datasets.utils.logging.set_verbosity(lowerCamelCase_)
transformers.utils.logging.set_verbosity(lowerCamelCase_)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}')
logger.info(f'Training/evaluation parameters {training_args}')
# Detecting last checkpoint.
UpperCamelCase__ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ : int = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.')
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCamelCase__ : Any = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCamelCase__ : str = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : str = train_dataset.features['label'].names
if training_args.do_eval:
UpperCamelCase__ : Tuple = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : Tuple = eval_dataset.features['label'].names
if training_args.do_predict:
UpperCamelCase__ : Optional[int] = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : int = predict_dataset.features['label'].names
# Labels
UpperCamelCase__ : Tuple = len(lowerCamelCase_)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , idalabel={str(lowerCamelCase_): label for i, label in enumerate(lowerCamelCase_)} , labelaid={label: i for i, label in enumerate(lowerCamelCase_)} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : Dict = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCamelCase__ : Tuple = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCamelCase__ : List[Any] = False
def preprocess_function(lowerCamelCase_):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=lowerCamelCase_ , max_length=data_args.max_seq_length , truncation=lowerCamelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ : List[str] = min(len(lowerCamelCase_) , data_args.max_train_samples)
UpperCamelCase__ : List[str] = train_dataset.select(range(lowerCamelCase_))
with training_args.main_process_first(desc='train dataset map pre-processing'):
UpperCamelCase__ : Tuple = train_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase_)) , 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ : Dict = min(len(lowerCamelCase_) , data_args.max_eval_samples)
UpperCamelCase__ : Any = eval_dataset.select(range(lowerCamelCase_))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
UpperCamelCase__ : str = eval_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCamelCase__ : Dict = min(len(lowerCamelCase_) , data_args.max_predict_samples)
UpperCamelCase__ : Any = predict_dataset.select(range(lowerCamelCase_))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
UpperCamelCase__ : Any = predict_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
UpperCamelCase__ : str = evaluate.load('xnli')
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_):
UpperCamelCase__ : Tuple = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_) else p.predictions
UpperCamelCase__ : Any = np.argmax(lowerCamelCase_ , axis=1)
return metric.compute(predictions=lowerCamelCase_ , references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCamelCase__ : str = default_data_collator
elif training_args.fpaa:
UpperCamelCase__ : Optional[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8)
else:
UpperCamelCase__ : int = None
# Initialize our Trainer
UpperCamelCase__ : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
UpperCamelCase__ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ : Any = last_checkpoint
UpperCamelCase__ : Dict = trainer.train(resume_from_checkpoint=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = train_result.metrics
UpperCamelCase__ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_)
)
UpperCamelCase__ : str = min(lowerCamelCase_ , len(lowerCamelCase_))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_)
trainer.save_metrics('train' , lowerCamelCase_)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***')
UpperCamelCase__ : Optional[int] = trainer.evaluate(eval_dataset=lowerCamelCase_)
UpperCamelCase__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_)
UpperCamelCase__ : str = min(lowerCamelCase_ , len(lowerCamelCase_))
trainer.log_metrics('eval' , lowerCamelCase_)
trainer.save_metrics('eval' , lowerCamelCase_)
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***')
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict')
UpperCamelCase__ : List[str] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase_)
)
UpperCamelCase__ : str = min(lowerCamelCase_ , len(lowerCamelCase_))
trainer.log_metrics('predict' , lowerCamelCase_)
trainer.save_metrics('predict' , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = np.argmax(lowerCamelCase_ , axis=1)
UpperCamelCase__ : Optional[int] = os.path.join(training_args.output_dir , 'predictions.txt')
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w') as writer:
writer.write('index\tprediction\n')
for index, item in enumerate(lowerCamelCase_):
UpperCamelCase__ : str = label_list[item]
writer.write(f'{index}\t{item}\n')
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
lowerCAmelCase__ = '▁'
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = BigBirdTokenizer
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = []
def __init__( self : int , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[MASK]" , UpperCAmelCase_ : Any="[CLS]" , **UpperCAmelCase_ : Tuple , ):
UpperCamelCase__ : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
UpperCamelCase__ : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : int = vocab_file
UpperCamelCase__ : Tuple = False if not self.vocab_file else True
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
UpperCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Dict = [self.sep_token_id]
UpperCamelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_):
copyfile(self.vocab_file , UpperCAmelCase_)
return (out_vocab_file,)
| 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = 42
class __lowercase (nn.Module ):
def __init__( self : Any , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Union[str, Any]=("DownEncoderBlock2D",) , UpperCAmelCase_ : int=(64,) , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Any="silu" , UpperCAmelCase_ : Any=True , ):
super().__init__()
UpperCamelCase__ : Tuple = layers_per_block
UpperCamelCase__ : str = torch.nn.Convad(
UpperCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase__ : str = None
UpperCamelCase__ : List[str] = nn.ModuleList([])
# down
UpperCamelCase__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase_):
UpperCamelCase__ : str = output_channel
UpperCamelCase__ : Any = block_out_channels[i]
UpperCamelCase__ : Any = i == len(UpperCAmelCase_) - 1
UpperCamelCase__ : int = get_down_block(
UpperCAmelCase_ , num_layers=self.layers_per_block , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
self.down_blocks.append(UpperCAmelCase_)
# mid
UpperCamelCase__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# out
UpperCamelCase__ : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase_ , eps=1e-6)
UpperCamelCase__ : Dict = nn.SiLU()
UpperCamelCase__ : str = 2 * out_channels if double_z else out_channels
UpperCamelCase__ : List[Any] = nn.Convad(block_out_channels[-1] , UpperCAmelCase_ , 3 , padding=1)
UpperCamelCase__ : Dict = False
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : List[Any] = x
UpperCamelCase__ : Union[str, Any] = self.conv_in(UpperCAmelCase_)
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ : Tuple):
def custom_forward(*UpperCAmelCase_ : Optional[Any]):
return module(*UpperCAmelCase_)
return custom_forward
# down
if is_torch_version('>=' , '1.11.0'):
for down_block in self.down_blocks:
UpperCamelCase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_)
# middle
UpperCamelCase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_)
else:
for down_block in self.down_blocks:
UpperCamelCase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_) , UpperCAmelCase_)
# middle
UpperCamelCase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) , UpperCAmelCase_)
else:
# down
for down_block in self.down_blocks:
UpperCamelCase__ : Optional[Any] = down_block(UpperCAmelCase_)
# middle
UpperCamelCase__ : str = self.mid_block(UpperCAmelCase_)
# post-process
UpperCamelCase__ : Optional[int] = self.conv_norm_out(UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.conv_act(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.conv_out(UpperCAmelCase_)
return sample
class __lowercase (nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=("UpDecoderBlock2D",) , UpperCAmelCase_ : str=(64,) , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Optional[Any]="silu" , UpperCAmelCase_ : Any="group" , ):
super().__init__()
UpperCamelCase__ : List[str] = layers_per_block
UpperCamelCase__ : int = nn.Convad(
UpperCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Dict = nn.ModuleList([])
UpperCamelCase__ : Any = in_channels if norm_type == 'spatial' else None
# mid
UpperCamelCase__ : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# up
UpperCamelCase__ : Dict = list(reversed(UpperCAmelCase_))
UpperCamelCase__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_):
UpperCamelCase__ : List[str] = output_channel
UpperCamelCase__ : Optional[Any] = reversed_block_out_channels[i]
UpperCamelCase__ : Tuple = i == len(UpperCAmelCase_) - 1
UpperCamelCase__ : List[Any] = get_up_block(
UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , resnet_time_scale_shift=UpperCAmelCase_ , )
self.up_blocks.append(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = output_channel
# out
if norm_type == "spatial":
UpperCamelCase__ : Any = SpatialNorm(block_out_channels[0] , UpperCAmelCase_)
else:
UpperCamelCase__ : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase_ , eps=1e-6)
UpperCamelCase__ : str = nn.SiLU()
UpperCamelCase__ : List[Any] = nn.Convad(block_out_channels[0] , UpperCAmelCase_ , 3 , padding=1)
UpperCamelCase__ : Optional[int] = False
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str=None):
UpperCamelCase__ : str = z
UpperCamelCase__ : str = self.conv_in(UpperCAmelCase_)
UpperCamelCase__ : List[str] = next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ : Optional[int]):
def custom_forward(*UpperCAmelCase_ : Tuple):
return module(*UpperCAmelCase_)
return custom_forward
if is_torch_version('>=' , '1.11.0'):
# middle
UpperCamelCase__ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_)
UpperCamelCase__ : str = sample.to(UpperCAmelCase_)
# up
for up_block in self.up_blocks:
UpperCamelCase__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_)
else:
# middle
UpperCamelCase__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = sample.to(UpperCAmelCase_)
# up
for up_block in self.up_blocks:
UpperCamelCase__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_) , UpperCAmelCase_ , UpperCAmelCase_)
else:
# middle
UpperCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Dict = sample.to(UpperCAmelCase_)
# up
for up_block in self.up_blocks:
UpperCamelCase__ : Dict = up_block(UpperCAmelCase_ , UpperCAmelCase_)
# post-process
if latent_embeds is None:
UpperCamelCase__ : str = self.conv_norm_out(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.conv_norm_out(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.conv_act(UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.conv_out(UpperCAmelCase_)
return sample
class __lowercase (nn.Module ):
def __init__( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]="random" , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[Any]=True):
super().__init__()
UpperCamelCase__ : int = n_e
UpperCamelCase__ : Any = vq_embed_dim
UpperCamelCase__ : List[Any] = beta
UpperCamelCase__ : str = legacy
UpperCamelCase__ : Any = nn.Embedding(self.n_e , self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e)
UpperCamelCase__ : List[str] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap)))
UpperCamelCase__ : Union[str, Any] = self.used.shape[0]
UpperCamelCase__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCamelCase__ : List[str] = self.re_embed
UpperCamelCase__ : Optional[int] = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.')
else:
UpperCamelCase__ : List[str] = n_e
UpperCamelCase__ : Dict = sane_index_shape
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = inds.shape
assert len(UpperCAmelCase_) > 1
UpperCamelCase__ : int = inds.reshape(ishape[0] , -1)
UpperCamelCase__ : List[Any] = self.used.to(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (inds[:, :, None] == used[None, None, ...]).long()
UpperCamelCase__ : Union[str, Any] = match.argmax(-1)
UpperCamelCase__ : Tuple = match.sum(2) < 1
if self.unknown_index == "random":
UpperCamelCase__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape).to(device=new.device)
else:
UpperCamelCase__ : Optional[Any] = self.unknown_index
return new.reshape(UpperCAmelCase_)
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Dict):
UpperCamelCase__ : Tuple = inds.shape
assert len(UpperCAmelCase_) > 1
UpperCamelCase__ : Any = inds.reshape(ishape[0] , -1)
UpperCamelCase__ : Optional[Any] = self.used.to(UpperCAmelCase_)
if self.re_embed > self.used.shape[0]: # extra token
UpperCamelCase__ : Union[str, Any] = 0 # simply set to zero
UpperCamelCase__ : Dict = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase_)
return back.reshape(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int]):
# reshape z -> (batch, height, width, channel) and flatten
UpperCamelCase__ : str = z.permute(0 , 2 , 3 , 1).contiguous()
UpperCamelCase__ : Any = z.view(-1 , self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCamelCase__ : str = torch.argmin(torch.cdist(UpperCAmelCase_ , self.embedding.weight) , dim=1)
UpperCamelCase__ : Optional[int] = self.embedding(UpperCAmelCase_).view(z.shape)
UpperCamelCase__ : int = None
UpperCamelCase__ : Optional[Any] = None
# compute loss for embedding
if not self.legacy:
UpperCamelCase__ : Union[str, Any] = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
UpperCamelCase__ : Optional[int] = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
UpperCamelCase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCamelCase__ : Optional[Any] = z_q.permute(0 , 3 , 1 , 2).contiguous()
if self.remap is not None:
UpperCamelCase__ : Union[str, Any] = min_encoding_indices.reshape(z.shape[0] , -1) # add batch axis
UpperCamelCase__ : List[str] = self.remap_to_used(UpperCAmelCase_)
UpperCamelCase__ : Tuple = min_encoding_indices.reshape(-1 , 1) # flatten
if self.sane_index_shape:
UpperCamelCase__ : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCamelCase__ : Any = indices.reshape(shape[0] , -1) # add batch axis
UpperCamelCase__ : Dict = self.unmap_to_all(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = indices.reshape(-1) # flatten again
# get quantized latent vectors
UpperCamelCase__ : int = self.embedding(UpperCAmelCase_)
if shape is not None:
UpperCamelCase__ : Optional[Any] = z_q.view(UpperCAmelCase_)
# reshape back to match original input shape
UpperCamelCase__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2).contiguous()
return z_q
class __lowercase (__lowerCamelCase ):
def __init__( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=False):
UpperCamelCase__ : Optional[Any] = parameters
UpperCamelCase__, UpperCamelCase__ : Optional[int] = torch.chunk(UpperCAmelCase_ , 2 , dim=1)
UpperCamelCase__ : str = torch.clamp(self.logvar , -30.0 , 20.0)
UpperCamelCase__ : List[Any] = deterministic
UpperCamelCase__ : Tuple = torch.exp(0.5 * self.logvar)
UpperCamelCase__ : str = torch.exp(self.logvar)
if self.deterministic:
UpperCamelCase__ : int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.Generator] = None):
# make sure sample is on the same device as the parameters and has same dtype
UpperCamelCase__ : List[Any] = randn_tensor(
self.mean.shape , generator=UpperCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype)
UpperCamelCase__ : List[str] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Dict=None):
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2) + self.var - 1.0 - self.logvar , dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=[1, 2, 3]):
if self.deterministic:
return torch.Tensor([0.0])
UpperCamelCase__ : int = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2) / self.var , dim=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
return self.mean
| 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''linear'''
_lowerCamelCase = '''cosine'''
_lowerCamelCase = '''cosine_with_restarts'''
_lowerCamelCase = '''polynomial'''
_lowerCamelCase = '''constant'''
_lowerCamelCase = '''constant_with_warmup'''
_lowerCamelCase = '''piecewise_constant'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = -1) -> int:
return LambdaLR(lowerCamelCase_ , lambda lowerCamelCase_: 1 , last_epoch=lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = -1) -> int:
def lr_lambda(lowerCamelCase_):
if current_step < num_warmup_steps:
return float(lowerCamelCase_) / float(max(1.0 , lowerCamelCase_))
return 1.0
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , last_epoch=lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = -1) -> Union[str, Any]:
UpperCamelCase__ : Union[str, Any] = {}
UpperCamelCase__ : List[str] = step_rules.split(',')
for rule_str in rule_list[:-1]:
UpperCamelCase__, UpperCamelCase__ : str = rule_str.split(':')
UpperCamelCase__ : Optional[int] = int(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = float(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = value
UpperCamelCase__ : str = float(rule_list[-1])
def create_rules_function(lowerCamelCase_ , lowerCamelCase_):
def rule_func(lowerCamelCase_) -> float:
UpperCamelCase__ : Dict = sorted(rules_dict.keys())
for i, sorted_step in enumerate(lowerCamelCase_):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCamelCase__ : str = create_rules_function(lowerCamelCase_ , lowerCamelCase_)
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , last_epoch=lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=-1) -> Optional[Any]:
def lr_lambda(lowerCamelCase_):
if current_step < num_warmup_steps:
return float(lowerCamelCase_) / float(max(1 , lowerCamelCase_))
return max(
0.0 , float(num_training_steps - current_step) / float(max(1 , num_training_steps - num_warmup_steps)))
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0.5 , lowerCamelCase_ = -1) -> Any:
def lr_lambda(lowerCamelCase_):
if current_step < num_warmup_steps:
return float(lowerCamelCase_) / float(max(1 , lowerCamelCase_))
UpperCamelCase__ : List[Any] = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps))
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase_) * 2.0 * progress)))
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = -1) -> List[Any]:
def lr_lambda(lowerCamelCase_):
if current_step < num_warmup_steps:
return float(lowerCamelCase_) / float(max(1 , lowerCamelCase_))
UpperCamelCase__ : str = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase_) * progress) % 1.0))))
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=1e-7 , lowerCamelCase_=1.0 , lowerCamelCase_=-1) -> Optional[int]:
UpperCamelCase__ : Dict = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})')
def lr_lambda(lowerCamelCase_):
if current_step < num_warmup_steps:
return float(lowerCamelCase_) / float(max(1 , lowerCamelCase_))
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCamelCase__ : Union[str, Any] = lr_init - lr_end
UpperCamelCase__ : int = num_training_steps - num_warmup_steps
UpperCamelCase__ : str = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCamelCase__ : Dict = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
lowerCAmelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = 1.0 , lowerCamelCase_ = -1 , ) -> Optional[Any]:
UpperCamelCase__ : Union[str, Any] = SchedulerType(lowerCamelCase_)
UpperCamelCase__ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase_ , last_epoch=lowerCamelCase_)
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase_ , step_rules=lowerCamelCase_ , last_epoch=lowerCamelCase_)
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.')
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , last_epoch=lowerCamelCase_)
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.')
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , num_training_steps=lowerCamelCase_ , num_cycles=lowerCamelCase_ , last_epoch=lowerCamelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , num_training_steps=lowerCamelCase_ , power=lowerCamelCase_ , last_epoch=lowerCamelCase_ , )
return schedule_func(
lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , num_training_steps=lowerCamelCase_ , last_epoch=lowerCamelCase_)
| 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import os
def __UpperCAmelCase ( lowerCamelCase_ = "input.txt") -> int:
with open(os.path.join(os.path.dirname(lowerCamelCase_) , lowerCamelCase_)) as input_file:
UpperCamelCase__ : Tuple = [
[int(lowerCamelCase_) for element in line.split(',')]
for line in input_file.readlines()
]
UpperCamelCase__ : Union[str, Any] = len(lowerCamelCase_)
UpperCamelCase__ : str = len(matrix[0])
UpperCamelCase__ : str = [[-1 for _ in range(lowerCamelCase_)] for _ in range(lowerCamelCase_)]
for i in range(lowerCamelCase_):
UpperCamelCase__ : Dict = matrix[i][0]
for j in range(1 , lowerCamelCase_):
for i in range(lowerCamelCase_):
UpperCamelCase__ : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCamelCase_):
UpperCamelCase__ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j])
for i in range(rows - 2 , -1 , -1):
UpperCamelCase__ : List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j])
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums)
if __name__ == "__main__":
print(f'''{solution() = }''')
| 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> list:
UpperCamelCase__ : Tuple = len(lowerCamelCase_)
UpperCamelCase__ : int = [[0] * n for i in range(lowerCamelCase_)]
for i in range(lowerCamelCase_):
UpperCamelCase__ : Dict = y_points[i]
for i in range(2 , lowerCamelCase_):
for j in range(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''tapas'''
def __init__( self : Any , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=1_024 , UpperCAmelCase_ : Dict=[3, 256, 256, 2, 256, 256, 10] , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : List[Any]=10.0 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=1.0 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]="ratio" , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Dict , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : Tuple = type_vocab_sizes
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : Optional[int] = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCamelCase__ : Optional[int] = positive_label_weight
UpperCamelCase__ : str = num_aggregation_labels
UpperCamelCase__ : Union[str, Any] = aggregation_loss_weight
UpperCamelCase__ : List[str] = use_answer_as_supervision
UpperCamelCase__ : List[str] = answer_loss_importance
UpperCamelCase__ : Tuple = use_normalized_answer_loss
UpperCamelCase__ : Optional[int] = huber_loss_delta
UpperCamelCase__ : Any = temperature
UpperCamelCase__ : int = aggregation_temperature
UpperCamelCase__ : str = use_gumbel_for_cells
UpperCamelCase__ : Dict = use_gumbel_for_aggregation
UpperCamelCase__ : List[Any] = average_approximation_function
UpperCamelCase__ : Dict = cell_selection_preference
UpperCamelCase__ : Any = answer_loss_cutoff
UpperCamelCase__ : str = max_num_rows
UpperCamelCase__ : Optional[Any] = max_num_columns
UpperCamelCase__ : Tuple = average_logits_per_cell
UpperCamelCase__ : Optional[int] = select_one_column
UpperCamelCase__ : Union[str, Any] = allow_empty_column_selection
UpperCamelCase__ : Tuple = init_cell_selection_weights_to_zero
UpperCamelCase__ : Dict = reset_position_index_per_cell
UpperCamelCase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
UpperCamelCase__ : Dict = aggregation_labels
UpperCamelCase__ : Optional[int] = no_aggregation_label_index
if isinstance(self.aggregation_labels , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = {int(UpperCAmelCase_): v for k, v in aggregation_labels.items()}
| 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = 42
class __lowercase :
def __init__( self : Any , UpperCAmelCase_ : int):
UpperCamelCase__ : list[list[Edge]] = [[] for _ in range(UpperCAmelCase_)]
UpperCamelCase__ : Dict = size
def __getitem__( self : Any , UpperCAmelCase_ : int):
return iter(self._graph[vertex])
@property
def __UpperCamelCase ( self : List[Any]):
return self._size
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).')
self._graph[from_vertex].append(Edge(UpperCAmelCase_ , UpperCAmelCase_))
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[int] = deque([start_vertex])
UpperCamelCase__ : list[int | None] = [None] * self.size
UpperCamelCase__ : Union[str, Any] = 0
while queue:
UpperCamelCase__ : Dict = queue.popleft()
UpperCamelCase__ : Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCamelCase__ : Optional[int] = current_distance + edge.weight
UpperCamelCase__ : Tuple = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase_ , UpperCAmelCase_)
and new_distance >= dest_vertex_distance
):
continue
UpperCamelCase__ : Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['gpt2']
lowerCAmelCase__ = 'gpt2'
if is_tf_available():
class __lowercase (tf.Module ):
def __init__( self : int , UpperCAmelCase_ : int):
super().__init__()
UpperCamelCase__ : Any = tokenizer
UpperCamelCase__ : Optional[int] = AutoConfig.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = TFGPTaLMHeadModel.from_config(UpperCAmelCase_)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text'),))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int):
UpperCamelCase__ : str = self.tokenizer(UpperCAmelCase_)
UpperCamelCase__ : Dict = tokenized['input_ids'].to_tensor()
UpperCamelCase__ : Tuple = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCamelCase__ : Union[str, Any] = self.model(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)['logits']
return outputs
@require_tf
@require_keras_nlp
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
super().setUp()
UpperCamelCase__ : Tuple = [GPTaTokenizer.from_pretrained(UpperCAmelCase_) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCamelCase__ : int = [TFGPTaTokenizer.from_pretrained(UpperCAmelCase_) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
UpperCamelCase__ : Dict = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __UpperCamelCase ( self : str):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
UpperCamelCase__ : int = tokenizer([test_inputs] , return_tensors='tf')
UpperCamelCase__ : List[str] = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCamelCase__ : Optional[int] = python_outputs[key].numpy()
UpperCamelCase__ : Optional[int] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase_ , tf.intaa) == tf_outputs_values))
@slow
def __UpperCamelCase ( self : List[Any]):
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ : Dict = tf.function(UpperCAmelCase_)
for test_inputs in self.test_sentences:
UpperCamelCase__ : int = tf.constant(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = compiled_tokenizer(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tf_tokenizer(UpperCAmelCase_)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __UpperCamelCase ( self : Union[str, Any]):
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ : Union[str, Any] = ModelToSave(tokenizer=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]])
UpperCamelCase__ : List[str] = model.serving(UpperCAmelCase_) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ : Dict = Path(UpperCAmelCase_) / 'saved.model'
tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={'serving_default': model.serving})
UpperCamelCase__ : Optional[int] = tf.saved_model.load(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = loaded_model.signatures['serving_default'](UpperCAmelCase_)['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __UpperCamelCase ( self : Dict):
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ : Tuple = tf.convert_to_tensor([self.test_sentences[0]])
UpperCamelCase__ : int = tf_tokenizer(UpperCAmelCase_) # Build model with some sample inputs
UpperCamelCase__ : Tuple = tf_tokenizer.get_config()
UpperCamelCase__ : Any = TFGPTaTokenizer.from_config(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = model_from_config(UpperCAmelCase_)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __UpperCamelCase ( self : List[Any]):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCamelCase__ : List[Any] = 123_123
for max_length in [3, 5, 1_024]:
UpperCamelCase__ : Tuple = tf.convert_to_tensor([self.test_sentences[0]])
UpperCamelCase__ : List[Any] = tf_tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 1 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
import functools
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
# Validation
if not isinstance(lowerCamelCase_ , lowerCamelCase_) or not all(isinstance(lowerCamelCase_ , lowerCamelCase_) for day in days):
raise ValueError('The parameter days should be a list of integers')
if len(lowerCamelCase_) != 3 or not all(isinstance(lowerCamelCase_ , lowerCamelCase_) for cost in costs):
raise ValueError('The parameter costs should be a list of three integers')
if len(lowerCamelCase_) == 0:
return 0
if min(lowerCamelCase_) <= 0:
raise ValueError('All days elements should be greater than 0')
if max(lowerCamelCase_) >= 366:
raise ValueError('All days elements should be less than 366')
UpperCamelCase__ : Optional[Any] = set(lowerCamelCase_)
@functools.cache
def dynamic_programming(lowerCamelCase_) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 10
def __UpperCAmelCase ( lowerCamelCase_) -> list[int]:
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : Any = max(lowerCamelCase_)
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase__ : list[list] = [[] for _ in range(lowerCamelCase_)]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase__ : Any = int((i / placement) % RADIX)
buckets[tmp].append(lowerCamelCase_)
# put each buckets' contents into list_of_ints
UpperCamelCase__ : int = 0
for b in range(lowerCamelCase_):
for i in buckets[b]:
UpperCamelCase__ : int = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ = 10 , lowerCamelCase_ = 1_000 , lowerCamelCase_ = True) -> int:
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_)
and isinstance(lowerCamelCase_ , lowerCamelCase_)
and isinstance(lowerCamelCase_ , lowerCamelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)')
return min_val if option else max_val
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
return int((number_a + number_a) / 2)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> None:
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_) and isinstance(lowerCamelCase_ , lowerCamelCase_) and isinstance(lowerCamelCase_ , lowerCamelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)')
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value')
def answer(lowerCamelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...')
UpperCamelCase__ : Dict = lower
UpperCamelCase__ : Optional[Any] = higher
UpperCamelCase__ : Optional[int] = []
while True:
UpperCamelCase__ : List[Any] = get_avg(lowerCamelCase_ , lowerCamelCase_)
last_numbers.append(lowerCamelCase_)
if answer(lowerCamelCase_) == "low":
UpperCamelCase__ : Any = number
elif answer(lowerCamelCase_) == "high":
UpperCamelCase__ : Optional[Any] = number
else:
break
print(f'guess the number : {last_numbers[-1]}')
print(f'details : {last_numbers!s}')
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : Dict = int(input('Enter lower value : ').strip())
UpperCamelCase__ : Union[str, Any] = int(input('Enter high value : ').strip())
UpperCamelCase__ : Optional[int] = int(input('Enter value to guess : ').strip())
guess_the_number(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __UpperCAmelCase ( lowerCamelCase_) -> tuple:
return (data["data"], data["target"])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Optional[Any] = XGBRegressor(verbosity=0 , random_state=42)
xgb.fit(lowerCamelCase_ , lowerCamelCase_)
# Predict target for test data
UpperCamelCase__ : Union[str, Any] = xgb.predict(lowerCamelCase_)
UpperCamelCase__ : int = predictions.reshape(len(lowerCamelCase_) , 1)
return predictions
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[str] = fetch_california_housing()
UpperCamelCase__, UpperCamelCase__ : int = data_handling(lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = train_test_split(
lowerCamelCase_ , lowerCamelCase_ , test_size=0.25 , random_state=1)
UpperCamelCase__ : Dict = xgboost(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(lowerCamelCase_ , lowerCamelCase_)}')
print(f'Mean Square Error : {mean_squared_error(lowerCamelCase_ , lowerCamelCase_)}')
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCAmelCase__ = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
lowerCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ = dict(zip(vocab, range(len(vocab))))
lowerCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(tmpdirname)
lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
lowerCAmelCase__ = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCAmelCase__ = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCAmelCase__ = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
lowerCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt')
lowerCAmelCase__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 1 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCAmelCase__ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Union[str, Any] = {}
state_dict.pop('pixel_mean' , lowerCamelCase_)
state_dict.pop('pixel_std' , lowerCamelCase_)
UpperCamelCase__ : str = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase__ : int = key.replace(lowerCamelCase_ , lowerCamelCase_)
if re.match(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : int = int(re.match(lowerCamelCase_ , lowerCamelCase_).group(2))
if layer_nb == 0:
UpperCamelCase__ : Optional[Any] = key.replace('layers.0' , 'proj_in')
elif layer_nb == 1:
UpperCamelCase__ : List[str] = key.replace('layers.1' , 'layers.0')
elif layer_nb == 2:
UpperCamelCase__ : List[Any] = key.replace('layers.2' , 'proj_out')
UpperCamelCase__ : Any = value
UpperCamelCase__ : int = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="ybelkada/segment-anything") -> Any:
UpperCamelCase__ : Dict = hf_hub_download(lowerCamelCase_ , f'checkpoints/{model_name}.pth')
if "sam_vit_b" in model_name:
UpperCamelCase__ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
UpperCamelCase__ : Union[str, Any] = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
UpperCamelCase__ : Tuple = SamConfig(
vision_config=lowerCamelCase_ , )
elif "sam_vit_h" in model_name:
UpperCamelCase__ : Optional[Any] = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
UpperCamelCase__ : str = SamConfig(
vision_config=lowerCamelCase_ , )
UpperCamelCase__ : Optional[int] = torch.load(lowerCamelCase_ , map_location='cpu')
UpperCamelCase__ : Union[str, Any] = replace_keys(lowerCamelCase_)
UpperCamelCase__ : int = SamImageProcessor()
UpperCamelCase__ : Dict = SamProcessor(image_processor=lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = SamModel(lowerCamelCase_)
hf_model.load_state_dict(lowerCamelCase_)
UpperCamelCase__ : Dict = hf_model.to('cuda')
UpperCamelCase__ : Dict = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
UpperCamelCase__ : Any = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw).convert('RGB')
UpperCamelCase__ : Dict = [[[400, 650]]]
UpperCamelCase__ : List[str] = [[1]]
UpperCamelCase__ : Any = processor(images=np.array(lowerCamelCase_) , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase__ : Tuple = hf_model(**lowerCamelCase_)
UpperCamelCase__ : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
UpperCamelCase__ : Union[str, Any] = processor(
images=np.array(lowerCamelCase_) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase__ : List[str] = hf_model(**lowerCamelCase_)
UpperCamelCase__ : List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
UpperCamelCase__ : Tuple = ((75, 275, 1_725, 850),)
UpperCamelCase__ : str = processor(images=np.array(lowerCamelCase_) , input_boxes=lowerCamelCase_ , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase__ : Tuple = hf_model(**lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
UpperCamelCase__ : str = [[[400, 650], [800, 650]]]
UpperCamelCase__ : Optional[int] = [[1, 1]]
UpperCamelCase__ : Any = processor(
images=np.array(lowerCamelCase_) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase__ : List[str] = hf_model(**lowerCamelCase_)
UpperCamelCase__ : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
lowerCAmelCase__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
import os
import numpy
import onnx
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = a.name
UpperCamelCase__ : Dict = b.name
UpperCamelCase__ : int = ''
UpperCamelCase__ : Dict = ''
UpperCamelCase__ : Any = a == b
UpperCamelCase__ : List[str] = name_a
UpperCamelCase__ : List[Any] = name_b
return res
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_)
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : int = list(model.graph.initializer)
UpperCamelCase__ : int = list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase__ : Tuple = inits[i].name
UpperCamelCase__ : Optional[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Union[str, Any] = os.path.dirname(lowerCamelCase_)
UpperCamelCase__ : List[Any] = os.path.basename(lowerCamelCase_)
UpperCamelCase__ : Any = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_))
UpperCamelCase__ : List[str] = list(model.graph.initializer)
UpperCamelCase__ : List[str] = set()
UpperCamelCase__ : int = {}
UpperCamelCase__ : int = []
UpperCamelCase__ : Tuple = 0
for i in range(len(lowerCamelCase_)):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase_)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j]):
dup_set.add(lowerCamelCase_)
dup_set.add(lowerCamelCase_)
UpperCamelCase__ : Any = inits[j].data_type
UpperCamelCase__ : List[str] = numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase_)
total_reduced_size += mem_size
UpperCamelCase__ : Tuple = inits[i].name
UpperCamelCase__ : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase_)
else:
UpperCamelCase__ : List[str] = [name_j]
ind_to_replace.append((j, i))
print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB')
UpperCamelCase__ : List[str] = sorted(lowerCamelCase_)
_remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Any = 'optimized_' + model_file_name
UpperCamelCase__ : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_)
onnx.save(lowerCamelCase_ , lowerCamelCase_)
return new_model
| 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
class __lowercase :
def __init__( self : Dict , UpperCAmelCase_ : int):
UpperCamelCase__ : Dict = size
# approximate the overall size of segment tree with given value
UpperCamelCase__ : Any = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
UpperCamelCase__ : Tuple = [0 for i in range(0 , 4 * size)]
UpperCamelCase__ : List[str] = [0 for i in range(0 , 4 * size)] # flag for lazy update
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
return idx * 2
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int):
return idx * 2 + 1
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int]):
if left_element == right_element:
UpperCamelCase__ : Optional[int] = a[left_element - 1]
else:
UpperCamelCase__ : str = (left_element + right_element) // 2
self.build(self.left(UpperCAmelCase_) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
self.build(self.right(UpperCAmelCase_) , mid + 1 , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = max(
self.segment_tree[self.left(UpperCAmelCase_)] , self.segment_tree[self.right(UpperCAmelCase_)])
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if self.flag[idx] is True:
UpperCamelCase__ : Union[str, Any] = self.lazy[idx]
UpperCamelCase__ : List[Any] = False
if left_element != right_element:
UpperCamelCase__ : List[Any] = self.lazy[idx]
UpperCamelCase__ : str = self.lazy[idx]
UpperCamelCase__ : int = True
UpperCamelCase__ : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase__ : str = val
if left_element != right_element:
UpperCamelCase__ : List[Any] = val
UpperCamelCase__ : Tuple = val
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : int = True
return True
UpperCamelCase__ : Union[str, Any] = (left_element + right_element) // 2
self.update(self.left(UpperCAmelCase_) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
self.update(self.right(UpperCAmelCase_) , mid + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = max(
self.segment_tree[self.left(UpperCAmelCase_)] , self.segment_tree[self.right(UpperCAmelCase_)])
return True
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if self.flag[idx] is True:
UpperCamelCase__ : Union[str, Any] = self.lazy[idx]
UpperCamelCase__ : Optional[int] = False
if left_element != right_element:
UpperCamelCase__ : Dict = self.lazy[idx]
UpperCamelCase__ : Optional[Any] = self.lazy[idx]
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Any = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase__ : Union[str, Any] = (left_element + right_element) // 2
UpperCamelCase__ : int = self.query(self.left(UpperCAmelCase_) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : List[Any] = self.query(self.right(UpperCAmelCase_) , mid + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return max(UpperCAmelCase_ , UpperCAmelCase_)
def __str__( self : List[str]):
return str([self.query(1 , 1 , self.size , UpperCAmelCase_ , UpperCAmelCase_) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowerCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowerCAmelCase__ = 15
lowerCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = RobertaTokenizer
def __init__( self : Any , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]="replace" , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : List[str]="</s>" , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : str="<pad>" , UpperCAmelCase_ : Tuple="<mask>" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_) != add_prefix_space:
UpperCamelCase__ : Tuple = getattr(UpperCAmelCase_ , pre_tok_state.pop('type'))
UpperCamelCase__ : Optional[Any] = add_prefix_space
UpperCamelCase__ : List[str] = pre_tok_class(**UpperCAmelCase_)
UpperCamelCase__ : str = add_prefix_space
UpperCamelCase__ : Optional[int] = 'post_processor'
UpperCamelCase__ : Dict = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
if tokenizer_component_instance:
UpperCamelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase__ : Optional[int] = tuple(state['sep'])
if "cls" in state:
UpperCamelCase__ : Tuple = tuple(state['cls'])
UpperCamelCase__ : Union[str, Any] = False
if state.get('add_prefix_space' , UpperCAmelCase_) != add_prefix_space:
UpperCamelCase__ : int = add_prefix_space
UpperCamelCase__ : List[str] = True
if state.get('trim_offsets' , UpperCAmelCase_) != trim_offsets:
UpperCamelCase__ : Tuple = trim_offsets
UpperCamelCase__ : List[str] = True
if changes_to_apply:
UpperCamelCase__ : Tuple = getattr(UpperCAmelCase_ , state.pop('type'))
UpperCamelCase__ : Any = component_class(**UpperCAmelCase_)
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Union[str, Any]):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Any):
UpperCamelCase__ : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else value
UpperCamelCase__ : Any = value
def __UpperCamelCase ( self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : str = kwargs.get('is_split_into_words' , UpperCAmelCase_)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = kwargs.get('is_split_into_words' , UpperCAmelCase_)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
UpperCamelCase__ : Tuple = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=None):
UpperCamelCase__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 1 |
'''simple docstring'''
class __lowercase :
def __init__( self : Dict , UpperCAmelCase_ : int):
UpperCamelCase__ : str = n
UpperCamelCase__ : List[str] = [None] * self.n
UpperCamelCase__ : Optional[int] = 0 # index of the first element
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Union[str, Any] = 0
def __len__( self : List[Any]):
return self.size
def __UpperCamelCase ( self : int):
return self.size == 0
def __UpperCamelCase ( self : Tuple):
return False if self.is_empty() else self.array[self.front]
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple):
if self.size >= self.n:
raise Exception('QUEUE IS FULL')
UpperCamelCase__ : int = data
UpperCamelCase__ : Union[str, Any] = (self.rear + 1) % self.n
self.size += 1
return self
def __UpperCamelCase ( self : List[str]):
if self.size == 0:
raise Exception('UNDERFLOW')
UpperCamelCase__ : str = self.array[self.front]
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : str = (self.front + 1) % self.n
self.size -= 1
return temp
| 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> tuple[float, float]:
# Check if the input is valid
if not len(lowerCamelCase_) == len(lowerCamelCase_) == 3:
raise ValueError('Please enter a valid equation.')
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.')
# Extract the coefficients
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = equationa
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCamelCase__ : Optional[int] = aa * ba - aa * ba
UpperCamelCase__ : Union[str, Any] = ca * ba - ca * ba
UpperCamelCase__ : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)')
else:
raise ValueError('No solution. (Inconsistent system)')
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCamelCase__ : Tuple = determinant_x / determinant
UpperCamelCase__ : Optional[int] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Optional[int] = create_tensor(lowerCamelCase_)
UpperCamelCase__ : List[Any] = gather(lowerCamelCase_)
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1))
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Dict = [state.process_index]
UpperCamelCase__ : Any = gather_object(lowerCamelCase_)
assert len(lowerCamelCase_) == state.num_processes, f'{gathered_obj}, {len(lowerCamelCase_)} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes)), f'{gathered_obj} != {list(range(state.num_processes))}'
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Dict = create_tensor(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = broadcast(lowerCamelCase_)
assert broadcasted_tensor.shape == torch.Size([state.num_processes])
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1))
def __UpperCAmelCase ( lowerCamelCase_) -> str:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCamelCase__ : Optional[Any] = torch.arange(state.num_processes + 1).to(state.device)
else:
UpperCamelCase__ : Any = torch.arange(state.num_processes).to(state.device)
UpperCamelCase__ : int = pad_across_processes(lowerCamelCase_)
assert padded_tensor.shape == torch.Size([state.num_processes + 1])
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes)) + [0]
def __UpperCAmelCase ( lowerCamelCase_) -> int:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCamelCase__ : Dict = create_tensor(lowerCamelCase_)
UpperCamelCase__ : int = reduce(lowerCamelCase_ , 'sum')
UpperCamelCase__ : Tuple = torch.tensor([4.0, 6]).to(state.device)
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_), f'{reduced_tensor} != {truth_tensor}'
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCamelCase__ : List[str] = create_tensor(lowerCamelCase_)
UpperCamelCase__ : List[str] = reduce(lowerCamelCase_ , 'mean')
UpperCamelCase__ : List[Any] = torch.tensor([2.0, 3]).to(state.device)
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_), f'{reduced_tensor} != {truth_tensor}'
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
# For xla_spawn (TPUs)
main()
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Any = PartialState()
state.print(f'State: {state}')
state.print('testing gather')
test_gather(lowerCamelCase_)
state.print('testing gather_object')
test_gather_object(lowerCamelCase_)
state.print('testing broadcast')
test_broadcast(lowerCamelCase_)
state.print('testing pad_across_processes')
test_pad_across_processes(lowerCamelCase_)
state.print('testing reduce_sum')
test_reduce_sum(lowerCamelCase_)
state.print('testing reduce_mean')
test_reduce_mean(lowerCamelCase_)
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''facebook/bart-large-mnli'''
_lowerCamelCase = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
_lowerCamelCase = '''text_classifier'''
_lowerCamelCase = AutoTokenizer
_lowerCamelCase = AutoModelForSequenceClassification
_lowerCamelCase = ['''text''', ['''text''']]
_lowerCamelCase = ['''text''']
def __UpperCamelCase ( self : List[str]):
super().setup()
UpperCamelCase__ : List[str] = self.model.config
UpperCamelCase__ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
UpperCamelCase__ : int = int(UpperCAmelCase_)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : List[Any] = labels
return self.pre_processor(
[text] * len(UpperCAmelCase_) , [F'This example is {label}' for label in labels] , return_tensors='pt' , padding='max_length' , )
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : Union[str, Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''open-llama'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Any=100_000 , UpperCAmelCase_ : Union[str, Any]=4_096 , UpperCAmelCase_ : int=11_008 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : Optional[int]="silu" , UpperCAmelCase_ : Optional[int]=2_048 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-6 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Dict , ):
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : Optional[int] = max_position_embeddings
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : List[Any] = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : List[Any] = rms_norm_eps
UpperCamelCase__ : Optional[int] = use_cache
UpperCamelCase__ : Any = kwargs.pop(
'use_memorry_efficient_attention' , UpperCAmelCase_)
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_dropout_prob
UpperCamelCase__ : Optional[Any] = use_stable_embedding
UpperCamelCase__ : Optional[Any] = shared_input_output_embedding
UpperCamelCase__ : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __UpperCamelCase ( self : List[str]):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase_) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}')
UpperCamelCase__ : Optional[Any] = self.rope_scaling.get('type' , UpperCAmelCase_)
UpperCamelCase__ : Any = self.rope_scaling.get('factor' , UpperCAmelCase_)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''CLIPImageProcessor'''
_lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : int):
UpperCamelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = kwargs.pop('feature_extractor')
UpperCamelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def __call__( self : List[str] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Any):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
UpperCamelCase__ : Union[str, Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if images is not None:
UpperCamelCase__ : Tuple = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if text is not None and images is not None:
UpperCamelCase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_)
def __UpperCamelCase ( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : str = self.tokenizer.model_input_names
UpperCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __UpperCamelCase ( self : int):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : str):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase_ , )
return self.image_processor
| 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive')
UpperCamelCase__ : Tuple = str(bin(lowerCamelCase_))[2:] # remove the leading "0b"
UpperCamelCase__ : str = str(bin(lowerCamelCase_))[2:]
UpperCamelCase__ : List[str] = max(len(lowerCamelCase_) , len(lowerCamelCase_))
return "0b" + "".join(
str(int('1' in (char_a, char_b)))
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_) , b_binary.zfill(lowerCamelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 1 |
'''simple docstring'''
import functools
from typing import Any
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> bool:
# Validation
if not isinstance(lowerCamelCase_ , lowerCamelCase_) or len(lowerCamelCase_) == 0:
raise ValueError('the string should be not empty string')
if not isinstance(lowerCamelCase_ , lowerCamelCase_) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_) and len(lowerCamelCase_) > 0 for item in words):
raise ValueError('the words should be a list of non-empty strings')
# Build trie
UpperCamelCase__ : dict[str, Any] = {}
UpperCamelCase__ : Any = 'WORD_KEEPER'
for word in words:
UpperCamelCase__ : Optional[Any] = trie
for c in word:
if c not in trie_node:
UpperCamelCase__ : Optional[int] = {}
UpperCamelCase__ : str = trie_node[c]
UpperCamelCase__ : Any = True
UpperCamelCase__ : List[Any] = len(lowerCamelCase_)
# Dynamic programming method
@functools.cache
def is_breakable(lowerCamelCase_) -> bool:
if index == len_string:
return True
UpperCamelCase__ : Optional[int] = trie
for i in range(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Dict = trie_node.get(string[i] , lowerCamelCase_)
if trie_node is None:
return False
if trie_node.get(lowerCamelCase_ , lowerCamelCase_) and is_breakable(i + 1):
return True
return False
return is_breakable(0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True) -> Dict:
model.train()
UpperCamelCase__ : Any = model(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = F.mse_loss(lowerCamelCase_ , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> int:
set_seed(42)
UpperCamelCase__ : Any = RegressionModel()
UpperCamelCase__ : Optional[int] = deepcopy(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = RegressionDataset(length=80)
UpperCamelCase__ : Tuple = DataLoader(lowerCamelCase_ , batch_size=16)
model.to(accelerator.device)
if sched:
UpperCamelCase__ : str = AdamW(params=model.parameters() , lr=1e-3)
UpperCamelCase__ : str = AdamW(params=ddp_model.parameters() , lr=1e-3)
UpperCamelCase__ : Any = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_: epoch**0.65)
UpperCamelCase__ : str = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_: epoch**0.65)
# Make a copy of `model`
if sched:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Optional[int] = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
UpperCamelCase__, UpperCamelCase__ : Dict = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
# Test when on a single CPU or GPU that the context manager does nothing
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Optional[Any] = get_training_setup(lowerCamelCase_)
# Use a single batch
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = next(iter(lowerCamelCase_)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__, UpperCamelCase__ : int = accelerator.gather((ddp_input, ddp_target))
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration)
UpperCamelCase__ : Union[str, Any] = ddp_input[torch.randperm(len(lowerCamelCase_))]
def __UpperCAmelCase ( lowerCamelCase_) -> str:
# Test on distributed setup that context manager behaves properly
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = get_training_setup(lowerCamelCase_)
# Use a single batch
UpperCamelCase__, UpperCamelCase__ : str = next(iter(lowerCamelCase_)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__, UpperCamelCase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
UpperCamelCase__, UpperCamelCase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration)
UpperCamelCase__ : Any = ddp_input[torch.randperm(len(lowerCamelCase_))]
def __UpperCAmelCase ( lowerCamelCase_=False , lowerCamelCase_=False) -> Dict:
UpperCamelCase__ : Optional[Any] = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2)
# Test that context manager behaves properly
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = get_training_setup(lowerCamelCase_)
for iteration, batch in enumerate(lowerCamelCase_):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
UpperCamelCase__, UpperCamelCase__ : int = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase_):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase_) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration)
UpperCamelCase__ : int = ddp_input[torch.randperm(len(lowerCamelCase_))]
GradientState._reset_state()
def __UpperCAmelCase ( lowerCamelCase_=False , lowerCamelCase_=False) -> Optional[int]:
UpperCamelCase__ : Dict = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2)
# Test that context manager behaves properly
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = get_training_setup(lowerCamelCase_ , lowerCamelCase_)
for iteration, batch in enumerate(lowerCamelCase_):
UpperCamelCase__, UpperCamelCase__ : int = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__, UpperCamelCase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
UpperCamelCase__, UpperCamelCase__ : Optional[int] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase_)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase_):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
UpperCamelCase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase_))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration)
GradientState._reset_state()
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : List[Any] = Accelerator()
UpperCamelCase__ : Dict = RegressionDataset(length=80)
UpperCamelCase__ : List[Any] = DataLoader(lowerCamelCase_ , batch_size=16)
UpperCamelCase__ : int = RegressionDataset(length=96)
UpperCamelCase__ : str = DataLoader(lowerCamelCase_ , batch_size=16)
UpperCamelCase__, UpperCamelCase__ : List[Any] = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase_):
assert id(accelerator.gradient_state.active_dataloader) == id(lowerCamelCase_)
if iteration < len(lowerCamelCase_) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase_):
assert id(accelerator.gradient_state.active_dataloader) == id(lowerCamelCase_)
if batch_num < len(lowerCamelCase_) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCAmelCase ( ) -> Optional[int]:
UpperCamelCase__ : str = Accelerator()
UpperCamelCase__ : int = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**')
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**')
test_noop_sync(lowerCamelCase_)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**')
test_distributed_sync(lowerCamelCase_)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowerCamelCase_ , lowerCamelCase_)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0') or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __lowercase (yaml.SafeLoader ):
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCamelCase__ : str = [tuple(UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else key for key in keys]
UpperCamelCase__ : int = Counter(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}')
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False):
UpperCamelCase__ : Union[str, Any] = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_)
self._check_no_duplicates_on_constructed_node(UpperCAmelCase_)
return mapping
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple[Optional[str], str]:
UpperCamelCase__ : str = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCamelCase__ : Dict = full_content[1:].index('---') + 1
UpperCamelCase__ : Any = '\n'.join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(lowerCamelCase_)
class __lowercase (__lowerCamelCase ):
# class attributes
_lowerCamelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : List[str] , UpperCAmelCase_ : Path):
with open(UpperCAmelCase_ , encoding='utf-8') as readme_file:
UpperCamelCase__, UpperCamelCase__ : Any = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase_)
else:
return cls()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Path):
if path.exists():
with open(UpperCAmelCase_ , encoding='utf-8') as readme_file:
UpperCamelCase__ : Tuple = readme_file.read()
else:
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : List[Any] = self._to_readme(UpperCAmelCase_)
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as readme_file:
readme_file.write(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[str] = None):
if readme_content is not None:
UpperCamelCase__, UpperCamelCase__ : Any = _split_yaml_from_readme(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
UpperCamelCase__ : List[str] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : str):
UpperCamelCase__ : Any = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCamelCase__ : int = {
(key.replace('-' , '_') if key.replace('-' , '_') in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
return yaml.safe_dump(
{
(key.replace('_' , '-') if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='utf-8' , ).decode('utf-8')
lowerCAmelCase__ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCAmelCase__ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
lowerCAmelCase__ = ap.parse_args()
lowerCAmelCase__ = Path(args.readme_filepath)
lowerCAmelCase__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''distilbert'''
_lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : Any , UpperCAmelCase_ : Tuple=30_522 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[Any]=6 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : Tuple=4 * 768 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.2 , UpperCAmelCase_ : Dict=0 , **UpperCAmelCase_ : Tuple , ):
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : Dict = sinusoidal_pos_embds
UpperCamelCase__ : List[str] = n_layers
UpperCamelCase__ : Union[str, Any] = n_heads
UpperCamelCase__ : Any = dim
UpperCamelCase__ : Any = hidden_dim
UpperCamelCase__ : Union[str, Any] = dropout
UpperCamelCase__ : Union[str, Any] = attention_dropout
UpperCamelCase__ : Union[str, Any] = activation
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : str = qa_dropout
UpperCamelCase__ : str = seq_classif_dropout
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_)
class __lowercase (__lowerCamelCase ):
@property
def __UpperCamelCase ( self : List[Any]):
if self.task == "multiple-choice":
UpperCamelCase__ : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase__ : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __UpperCAmelCase ( lowerCamelCase_) -> None:
create_state_space_tree(lowerCamelCase_ , [] , 0)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> None:
if index == len(lowerCamelCase_):
print(lowerCamelCase_)
return
create_state_space_tree(lowerCamelCase_ , lowerCamelCase_ , index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(lowerCamelCase_ , lowerCamelCase_ , index + 1)
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase__ = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0xE_0_0_0
lowerCAmelCase__ = 0xE_0_0_1
lowerCAmelCase__ = 0xE_0_0_2
lowerCAmelCase__ = 0xE_0_0_3
lowerCAmelCase__ = 0xE_0_0_4
# Maps special codepoints to human-readable names.
lowerCAmelCase__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCAmelCase_ : Union[str, Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : int=chr(UpperCAmelCase_) , UpperCAmelCase_ : int=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : Union[str, Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=2_048 , **UpperCAmelCase_ : Optional[int] , ):
UpperCamelCase__ : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , model_max_length=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase__ : List[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase__ : Optional[int] = UNICODE_VOCAB_SIZE
UpperCamelCase__ : Union[str, Any] = len(self._special_codepoints)
@property
def __UpperCamelCase ( self : Dict):
return self._unicode_vocab_size
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
return list(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
try:
return ord(UpperCAmelCase_)
except TypeError:
raise ValueError(F'invalid token: \'{token}\'')
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase_)
except TypeError:
raise ValueError(F'invalid id: {index}')
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[Any]):
return "".join(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : List[str] = [self.sep_token_id]
UpperCamelCase__ : Union[str, Any] = [self.cls_token_id]
UpperCamelCase__ : Dict = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = [1] + ([0] * len(UpperCAmelCase_)) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase_)) + [1]
return result
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Tuple = [self.sep_token_id]
UpperCamelCase__ : str = [self.cls_token_id]
UpperCamelCase__ : Tuple = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def __UpperCamelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
return ()
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : str = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase_ , architectures=['RobertaPreLayerNormForMaskedLM'])
# convert state_dict
UpperCamelCase__ : Tuple = torch.load(hf_hub_download(repo_id=lowerCamelCase_ , filename='pytorch_model.bin'))
UpperCamelCase__ : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.'):
UpperCamelCase__ : Tuple = 'roberta_prelayernorm.' + tensor_key[len('roberta.') :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight') or tensor_key.endswith('.self.LayerNorm.bias'):
continue
UpperCamelCase__ : Tuple = tensor_value
UpperCamelCase__ : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase_ , config=lowerCamelCase_ , state_dict=lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
# convert tokenizer
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained(lowerCamelCase_)
tokenizer.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowercase :
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]=99 , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[Any]=None , ):
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : Tuple = seq_length
UpperCamelCase__ : Tuple = is_training
UpperCamelCase__ : Optional[Any] = use_input_mask
UpperCamelCase__ : Optional[int] = use_token_type_ids
UpperCamelCase__ : List[Any] = use_labels
UpperCamelCase__ : Any = vocab_size
UpperCamelCase__ : List[str] = hidden_size
UpperCamelCase__ : Dict = embedding_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Dict = attention_probs_dropout_prob
UpperCamelCase__ : str = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : Union[str, Any] = type_sequence_label_size
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Any = num_labels
UpperCamelCase__ : List[str] = num_choices
UpperCamelCase__ : List[str] = scope
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : List[str] = None
if self.use_input_mask:
UpperCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase__ : Any = None
if self.use_token_type_ids:
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCamelCase__ : int = ids_tensor([self.batch_size] , self.num_choices)
UpperCamelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any):
UpperCamelCase__ : Dict = MegatronBertModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
UpperCamelCase__ : Any = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : List[str] = MegatronBertForMaskedLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : str = MegatronBertForCausalLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Optional[Any] = MegatronBertForNextSentencePrediction(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : int = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Dict = MegatronBertForPreTraining(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , next_sentence_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : str = MegatronBertForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Optional[Any] = self.num_labels
UpperCamelCase__ : Optional[int] = MegatronBertForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : List[str] = self.num_labels
UpperCamelCase__ : Any = MegatronBertForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : Any = self.num_choices
UpperCamelCase__ : str = MegatronBertForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCamelCase__ : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCamelCase__ : List[str] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Dict = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
),
) : Dict = config_and_inputs
UpperCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
# test_resize_embeddings = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int=False):
UpperCamelCase__ : int = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class in get_values(UpperCAmelCase_):
UpperCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[str] = MegatronBertModelTester(self)
UpperCamelCase__ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : List[Any]):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCAmelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
return torch.tensor(
lowerCamelCase_ , dtype=torch.long , device=lowerCamelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
@slow
@unittest.skip('Model is not available.')
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[Any] = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
UpperCamelCase__ : List[Any] = os.path.join(os.environ['MYDIR'] , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = MegatronBertModel.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.half()
UpperCamelCase__ : Optional[int] = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]])
with torch.no_grad():
UpperCamelCase__ : List[str] = model(UpperCAmelCase_)[0]
UpperCamelCase__ : Union[str, Any] = torch.Size((1, 9, 1_024))
self.assertEqual(output.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3):
for jj in range(3):
UpperCamelCase__ : Tuple = output[0, ii, jj]
UpperCamelCase__ : Optional[Any] = expected[3 * ii + jj]
UpperCamelCase__ : Optional[Any] = 'ii={} jj={} a={} b={}'.format(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
self.assertTrue(math.isclose(UpperCAmelCase_ , UpperCAmelCase_ , rel_tol=UpperCAmelCase_ , abs_tol=UpperCAmelCase_) , msg=UpperCAmelCase_)
| 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __UpperCAmelCase ( lowerCamelCase_) -> str:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCamelCase__ : Union[str, Any] = k.replace(lowerCamelCase_ , lowerCamelCase_)
if k.startswith('encoder'):
UpperCamelCase__ : Any = k.replace('.attn' , '.self_attn')
UpperCamelCase__ : Optional[Any] = k.replace('norm1' , 'self_attn_layer_norm')
UpperCamelCase__ : List[Any] = k.replace('norm2' , 'final_layer_norm')
elif k.startswith('decoder'):
UpperCamelCase__ : int = k.replace('norm1' , 'self_attn_layer_norm')
UpperCamelCase__ : List[Any] = k.replace('norm2' , 'encoder_attn_layer_norm')
UpperCamelCase__ : List[Any] = k.replace('norm3' , 'final_layer_norm')
return k
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Tuple = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
UpperCamelCase__ : Optional[int] = sd.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = k.replace('layernorm_embedding' , 'layer_norm')
assert new_k not in sd
UpperCamelCase__ : Union[str, Any] = v
lowerCAmelCase__ = ['START']
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu')
UpperCamelCase__ : int = model['model']
UpperCamelCase__ : Optional[int] = BlenderbotConfig.from_json_file(lowerCamelCase_)
UpperCamelCase__ : Dict = BlenderbotForConditionalGeneration(lowerCamelCase_)
UpperCamelCase__ : int = m.model.state_dict().keys()
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Tuple = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCamelCase__ : List[Any] = rename_state_dict_key(lowerCamelCase_)
if new_k not in valid_keys:
failures.append([k, new_k])
else:
UpperCamelCase__ : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCamelCase_)
m.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_)
m.half()
m.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = (DDIMParallelScheduler,)
_lowerCamelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : List[str]):
UpperCamelCase__ : List[str] = {
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCAmelCase_)
return config
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Optional[int] = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config(**UpperCAmelCase_)
UpperCamelCase__ : List[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : int = 10, 0.0
UpperCamelCase__ : Optional[int] = self.dummy_model()
UpperCamelCase__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_)
for t in scheduler.timesteps:
UpperCamelCase__ : str = model(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_).prev_sample
return sample
def __UpperCamelCase ( self : Optional[int]):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def __UpperCamelCase ( self : str):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase_)
UpperCamelCase__ : int = self.scheduler_classes[0]
UpperCamelCase__ : Union[str, Any] = self.get_scheduler_config(steps_offset=1)
UpperCamelCase__ : List[Any] = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def __UpperCamelCase ( self : List[str]):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def __UpperCamelCase ( self : int):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=UpperCAmelCase_ , eta=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : str = self.scheduler_classes[0]
UpperCamelCase__ : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ : Dict = scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.1_47_71)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.3_24_60)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.0_09_79)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1e-5
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config()
UpperCamelCase__ : List[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : Any = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_model()
UpperCamelCase__ : Any = self.dummy_sample_deter
UpperCamelCase__ : Any = self.dummy_sample_deter + 0.1
UpperCamelCase__ : List[str] = self.dummy_sample_deter - 0.1
UpperCamelCase__ : int = samplea.shape[0]
UpperCamelCase__ : Any = torch.stack([samplea, samplea, samplea] , dim=0)
UpperCamelCase__ : Any = torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
UpperCamelCase__ : Tuple = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
UpperCamelCase__ : int = scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , UpperCAmelCase_)
UpperCamelCase__ : Dict = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 11_47.79_04) < 1e-2
assert abs(result_mean.item() - 0.49_82) < 1e-3
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = self.full_loop()
UpperCamelCase__ : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[str] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_72.00_67) < 1e-2
assert abs(result_mean.item() - 0.22_39_67) < 1e-3
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : str = self.full_loop(prediction_type='v_prediction')
UpperCamelCase__ : str = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Any = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 52.53_02) < 1e-2
assert abs(result_mean.item() - 0.06_84) < 1e-3
def __UpperCamelCase ( self : List[str]):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : Any = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : List[Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Any = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.82_95) < 1e-2
assert abs(result_mean.item() - 0.19_51) < 1e-3
def __UpperCamelCase ( self : Union[str, Any]):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : Any = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : Any = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.07_84) < 1e-2
assert abs(result_mean.item() - 0.19_41) < 1e-3
| 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> Tuple:
UpperCamelCase__ : Optional[Any] = None
if token is not None:
UpperCamelCase__ : str = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
UpperCamelCase__ : int = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
UpperCamelCase__ : List[str] = requests.get(lowerCamelCase_ , headers=lowerCamelCase_).json()
UpperCamelCase__ : Tuple = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']})
UpperCamelCase__ : Optional[Any] = math.ceil((result['total_count'] - 100) / 100)
for i in range(lowerCamelCase_):
UpperCamelCase__ : Optional[Any] = requests.get(url + f'&page={i + 2}' , headers=lowerCamelCase_).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']})
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}')
return {}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> Any:
UpperCamelCase__ : Optional[int] = None
if token is not None:
UpperCamelCase__ : Any = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
UpperCamelCase__ : Tuple = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
UpperCamelCase__ : Dict = requests.get(lowerCamelCase_ , headers=lowerCamelCase_).json()
UpperCamelCase__ : Tuple = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']})
UpperCamelCase__ : Dict = math.ceil((result['total_count'] - 100) / 100)
for i in range(lowerCamelCase_):
UpperCamelCase__ : Any = requests.get(url + f'&page={i + 2}' , headers=lowerCamelCase_).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']})
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}')
return {}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[Any] = None
if token is not None:
UpperCamelCase__ : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
UpperCamelCase__ : Optional[Any] = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ , allow_redirects=lowerCamelCase_)
UpperCamelCase__ : Tuple = result.headers['Location']
UpperCamelCase__ : Tuple = requests.get(lowerCamelCase_ , allow_redirects=lowerCamelCase_)
UpperCamelCase__ : List[Any] = os.path.join(lowerCamelCase_ , f'{artifact_name}.zip')
with open(lowerCamelCase_ , 'wb') as fp:
fp.write(response.content)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> List[str]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Optional[int] = None
with zipfile.ZipFile(lowerCamelCase_) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCamelCase_) as f:
for line in f:
UpperCamelCase__ : Optional[Any] = line.decode('UTF-8').strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCamelCase__ : List[Any] = line[: line.index(': ')]
UpperCamelCase__ : Tuple = line[line.index(': ') + len(': ') :]
errors.append([error_line, error])
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED '):
# `test` is the test method that failed
UpperCamelCase__ : List[str] = line[len('FAILED ') :]
failed_tests.append(lowerCamelCase_)
elif filename == "job_name.txt":
UpperCamelCase__ : Tuple = line
if len(lowerCamelCase_) != len(lowerCamelCase_):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCamelCase_)} for `errors` '
f'and {len(lowerCamelCase_)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.')
UpperCamelCase__ : Dict = None
if job_name and job_links:
UpperCamelCase__ : List[str] = job_links.get(lowerCamelCase_ , lowerCamelCase_)
# A list with elements of the form (line of error, error, failed test)
UpperCamelCase__ : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(lowerCamelCase_ , lowerCamelCase_)]
return result
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> Any:
UpperCamelCase__ : int = []
UpperCamelCase__ : Dict = [os.path.join(lowerCamelCase_ , lowerCamelCase_) for p in os.listdir(lowerCamelCase_) if p.endswith('.zip')]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCamelCase_ , job_links=lowerCamelCase_))
return errors
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> Any:
UpperCamelCase__ : Optional[int] = Counter()
counter.update([x[1] for x in logs])
UpperCamelCase__ : Dict = counter.most_common()
UpperCamelCase__ : str = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCamelCase__ : List[Any] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCamelCase__ : str = dict(sorted(r.items() , key=lambda lowerCamelCase_: item[1]["count"] , reverse=lowerCamelCase_))
return r
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = test.split('::')[0]
if test.startswith('tests/models/'):
UpperCamelCase__ : Optional[int] = test.split('/')[2]
else:
UpperCamelCase__ : int = None
return test
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> int:
UpperCamelCase__ : Tuple = [(x[0], x[1], get_model(x[2])) for x in logs]
UpperCamelCase__ : Tuple = [x for x in logs if x[2] is not None]
UpperCamelCase__ : List[Any] = {x[2] for x in logs}
UpperCamelCase__ : Union[str, Any] = {}
for test in tests:
UpperCamelCase__ : List[Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test])
UpperCamelCase__ : int = counter.most_common()
UpperCamelCase__ : List[Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCamelCase__ : Tuple = sum(error_counts.values())
if n_errors > 0:
UpperCamelCase__ : int = {'count': n_errors, 'errors': error_counts}
UpperCamelCase__ : str = dict(sorted(r.items() , key=lambda lowerCamelCase_: item[1]["count"] , reverse=lowerCamelCase_))
return r
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : Optional[int] = '| no. | error | status |'
UpperCamelCase__ : Dict = '|-:|:-|:-|'
UpperCamelCase__ : str = [header, sep]
for error in reduced_by_error:
UpperCamelCase__ : Optional[int] = reduced_by_error[error]['count']
UpperCamelCase__ : str = f'| {count} | {error[:100]} | |'
lines.append(lowerCamelCase_)
return "\n".join(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Union[str, Any] = '| model | no. of errors | major error | count |'
UpperCamelCase__ : Dict = '|-:|-:|-:|-:|'
UpperCamelCase__ : Optional[Any] = [header, sep]
for model in reduced_by_model:
UpperCamelCase__ : Any = reduced_by_model[model]['count']
UpperCamelCase__, UpperCamelCase__ : int = list(reduced_by_model[model]['errors'].items())[0]
UpperCamelCase__ : str = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(lowerCamelCase_)
return "\n".join(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase__ = get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase__ = k.find(' / ')
lowerCAmelCase__ = k[index + len(' / ') :]
lowerCAmelCase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase__ = reduce_by_error(errors)
lowerCAmelCase__ = reduce_by_model(errors)
lowerCAmelCase__ = make_github_table(reduced_by_error)
lowerCAmelCase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> Tuple:
UpperCamelCase__ : Optional[int] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight'))
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias'))
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight'))
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias'))
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight'))
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias'))
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight'))
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias'))
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight'))
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias'))
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
])
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCamelCase__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit') else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
])
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
for i in range(config.num_hidden_layers):
if base_model:
UpperCamelCase__ : int = ''
else:
UpperCamelCase__ : int = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.weight')
UpperCamelCase__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : str = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : List[str] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : Any = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : Union[str, Any] = dct.pop(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = val
def __UpperCAmelCase ( ) -> str:
UpperCamelCase__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCamelCase__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase__ : Tuple = 1_000
UpperCamelCase__ : List[Any] = 'huggingface/label-files'
UpperCamelCase__ : Any = 'imagenet-1k-id2label.json'
UpperCamelCase__ : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset') , 'r'))
UpperCamelCase__ : str = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = idalabel
UpperCamelCase__ : int = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : int = int(deit_name[-6:-4])
UpperCamelCase__ : int = int(deit_name[-3:])
# size of the architecture
if deit_name[9:].startswith('tiny'):
UpperCamelCase__ : Union[str, Any] = 192
UpperCamelCase__ : int = 768
UpperCamelCase__ : List[str] = 12
UpperCamelCase__ : Union[str, Any] = 3
elif deit_name[9:].startswith('small'):
UpperCamelCase__ : str = 384
UpperCamelCase__ : Union[str, Any] = 1_536
UpperCamelCase__ : List[str] = 12
UpperCamelCase__ : List[Any] = 6
if deit_name[9:].startswith('base'):
pass
elif deit_name[4:].startswith('large'):
UpperCamelCase__ : List[Any] = 1_024
UpperCamelCase__ : Union[str, Any] = 4_096
UpperCamelCase__ : List[Any] = 24
UpperCamelCase__ : str = 16
# load original model from timm
UpperCamelCase__ : Optional[Any] = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : List[Any] = timm_model.state_dict()
UpperCamelCase__ : List[Any] = create_rename_keys(lowerCamelCase_ , lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : List[str] = DeiTForImageClassificationWithTeacher(lowerCamelCase_).eval()
model.load_state_dict(lowerCamelCase_)
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCamelCase__ : Dict = int(
(256 / 224) * config.image_size) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCamelCase__ : int = DeiTImageProcessor(size=lowerCamelCase_ , crop_size=config.image_size)
UpperCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors='pt')
UpperCamelCase__ : Optional[Any] = encoding['pixel_values']
UpperCamelCase__ : str = model(lowerCamelCase_)
UpperCamelCase__ : List[str] = timm_model(lowerCamelCase_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase_ , outputs.logits , atol=1e-3)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowercase (__lowerCamelCase , __lowerCamelCase ):
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : List[str]=[2, 4, 8, 16] , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[str]=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1e-5 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Any , ):
super().__init__(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = patch_size
UpperCamelCase__ : Tuple = num_channels
UpperCamelCase__ : List[Any] = embed_dim
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : Dict = len(UpperCAmelCase_)
UpperCamelCase__ : str = num_heads
UpperCamelCase__ : List[Any] = kernel_size
UpperCamelCase__ : Union[str, Any] = mlp_ratio
UpperCamelCase__ : List[str] = qkv_bias
UpperCamelCase__ : List[Any] = hidden_dropout_prob
UpperCamelCase__ : int = attention_probs_dropout_prob
UpperCamelCase__ : str = drop_path_rate
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Optional[int] = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ : Tuple = int(embed_dim * 2 ** (len(UpperCAmelCase_) - 1))
UpperCamelCase__ : List[str] = layer_scale_init_value
UpperCamelCase__ : List[str] = ['stem'] + [F'stage{idx}' for idx in range(1 , len(UpperCAmelCase_) + 1)]
UpperCamelCase__, UpperCamelCase__ : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names)
| 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 1 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowercase :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : Union[str, Any]=6 , UpperCAmelCase_ : List[Any]=17 , UpperCAmelCase_ : Optional[int]=23 , UpperCAmelCase_ : Optional[Any]=11 , UpperCAmelCase_ : Union[str, Any]=True , ):
UpperCamelCase__ : str = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : int = seq_length
UpperCamelCase__ : Optional[Any] = act_dim
UpperCamelCase__ : str = state_dim
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = max_length
UpperCamelCase__ : str = is_training
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Dict = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
UpperCamelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
UpperCamelCase__ : Tuple = floats_tensor((self.batch_size, self.seq_length, 1))
UpperCamelCase__ : Any = floats_tensor((self.batch_size, self.seq_length, 1))
UpperCamelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000)
UpperCamelCase__ : str = random_attention_mask((self.batch_size, self.seq_length))
UpperCamelCase__ : int = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCamelCase ( self : List[Any]):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[str] = DecisionTransformerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[Any] = model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
),
) : Dict = config_and_inputs
UpperCamelCase__ : List[str] = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (DecisionTransformerModel,) if is_torch_available() else ()
_lowerCamelCase = ()
_lowerCamelCase = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_lowerCamelCase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = DecisionTransformerModelTester(self)
UpperCamelCase__ : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
@slow
def __UpperCamelCase ( self : Any):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Any = DecisionTransformerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__, UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Any = model_class(UpperCAmelCase_)
UpperCamelCase__ : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int = [*signature.parameters.keys()]
UpperCamelCase__ : Union[str, Any] = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(UpperCAmelCase_)] , UpperCAmelCase_)
@require_torch
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Dict = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase__ : Dict = 10 # defined by the RL environment, may be normalized
UpperCamelCase__ : Union[str, Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert')
UpperCamelCase__ : Optional[Any] = model.to(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = model.config
torch.manual_seed(0)
UpperCamelCase__ : int = torch.randn(1 , 1 , config.state_dim).to(device=UpperCAmelCase_ , dtype=torch.floataa) # env.reset()
UpperCamelCase__ : int = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = torch.tensor(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.floataa).reshape(1 , 1 , 1)
UpperCamelCase__ : Union[str, Any] = state
UpperCamelCase__ : Tuple = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase_ , dtype=torch.floataa)
UpperCamelCase__ : int = torch.zeros(1 , 0 , device=UpperCAmelCase_ , dtype=torch.floataa)
UpperCamelCase__ : List[str] = torch.tensor(0 , device=UpperCAmelCase_ , dtype=torch.long).reshape(1 , 1)
for step in range(UpperCAmelCase_):
UpperCamelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase_)] , dim=1)
UpperCamelCase__ : str = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase_)] , dim=1)
UpperCamelCase__ : Optional[int] = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Dict = model(
states=UpperCAmelCase_ , actions=UpperCAmelCase_ , rewards=UpperCAmelCase_ , returns_to_go=UpperCAmelCase_ , timesteps=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Dict = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=UpperCAmelCase_ , dtype=torch.floataa),
1.0,
False,
{},
)
UpperCamelCase__ : str = action_pred[0, -1]
UpperCamelCase__ : List[Any] = torch.cat([states, state] , dim=1)
UpperCamelCase__ : Union[str, Any] = returns_to_go[0, -1] - reward
UpperCamelCase__ : Dict = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
UpperCamelCase__ : Any = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase_ , dtype=torch.long) * (step + 1)] , dim=1)
| 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Tuple = set({'(', '[', '{'})
UpperCamelCase__ : Dict = set({')', ']', '}'})
UpperCamelCase__ : Optional[Any] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowerCamelCase_)):
if s[i] in open_brackets:
stack.append(s[i])
elif s[i] in closed_brackets and (
len(lowerCamelCase_) == 0 or (len(lowerCamelCase_) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase_) == 0
def __UpperCAmelCase ( ) -> Optional[int]:
UpperCamelCase__ : List[str] = input('Enter sequence of brackets: ')
if is_balanced(lowerCamelCase_):
print(lowerCamelCase_ , 'is balanced')
else:
print(lowerCamelCase_ , 'is not balanced')
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
return 1 / (1 + np.exp(-z))
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
return (-y * np.log(lowerCamelCase_) - (1 - y) * np.log(1 - h)).mean()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : int = np.dot(lowerCamelCase_ , lowerCamelCase_)
return np.sum(y * scores - np.log(1 + np.exp(lowerCamelCase_)))
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=70_000) -> Optional[int]:
UpperCamelCase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(lowerCamelCase_):
UpperCamelCase__ : List[Any] = np.dot(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : int = sigmoid_function(lowerCamelCase_)
UpperCamelCase__ : List[Any] = np.dot(x.T , h - y) / y.size
UpperCamelCase__ : str = theta - alpha * gradient # updating the weights
UpperCamelCase__ : str = np.dot(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Dict = sigmoid_function(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = cost_function(lowerCamelCase_ , lowerCamelCase_)
if iterations % 100 == 0:
print(f'loss: {j} \t') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowerCAmelCase__ = datasets.load_iris()
lowerCAmelCase__ = iris.data[:, :2]
lowerCAmelCase__ = (iris.target != 0) * 1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('theta: ', theta) # printing the theta i.e our weights vector
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
return sigmoid_function(
np.dot(lowerCamelCase_ , lowerCamelCase_)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((lowerCAmelCase__) , (lowerCAmelCase__)) = (x[:, 0].min(), x[:, 0].max())
((lowerCAmelCase__) , (lowerCAmelCase__)) = (x[:, 1].min(), x[:, 1].max())
((lowerCAmelCase__) , (lowerCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowerCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
lowerCAmelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
class __lowercase (__lowerCamelCase ):
pass
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
for shard in shards:
for i in range(lowerCamelCase_):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
UpperCamelCase__ : str = int(os.environ['RANK'])
UpperCamelCase__ : str = int(os.environ['WORLD_SIZE'])
UpperCamelCase__ : int = ArgumentParser()
parser.add_argument('--streaming' , type=lowerCamelCase_)
parser.add_argument('--local_rank' , type=lowerCamelCase_)
parser.add_argument('--num_workers' , type=lowerCamelCase_ , default=0)
UpperCamelCase__ : str = parser.parse_args()
UpperCamelCase__ : List[str] = args.streaming
UpperCamelCase__ : int = args.num_workers
UpperCamelCase__ : Tuple = {'shards': [f'shard_{shard_idx}' for shard_idx in range(lowerCamelCase_)]}
UpperCamelCase__ : int = IterableDataset.from_generator(lowerCamelCase_ , gen_kwargs=lowerCamelCase_)
if not streaming:
UpperCamelCase__ : List[Any] = Dataset.from_list(list(lowerCamelCase_))
UpperCamelCase__ : Union[str, Any] = split_dataset_by_node(lowerCamelCase_ , rank=lowerCamelCase_ , world_size=lowerCamelCase_)
UpperCamelCase__ : List[str] = torch.utils.data.DataLoader(lowerCamelCase_ , num_workers=lowerCamelCase_)
UpperCamelCase__ : Any = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
UpperCamelCase__ : Dict = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}')
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 1 |
'''simple docstring'''
import argparse
import datetime
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : int = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
UpperCamelCase__ : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase_) < 11:
raise ValueError('Must be 10 characters long')
# Get month
UpperCamelCase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12')
UpperCamelCase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'')
# Get day
UpperCamelCase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31')
# Get second separator
UpperCamelCase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'')
# Get year
UpperCamelCase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?')
# Get datetime obj for validation
UpperCamelCase__ : int = datetime.date(int(lowerCamelCase_) , int(lowerCamelCase_) , int(lowerCamelCase_))
# Start math
if m <= 2:
UpperCamelCase__ : int = y - 1
UpperCamelCase__ : List[Any] = m + 12
# maths var
UpperCamelCase__ : int = int(str(lowerCamelCase_)[:2])
UpperCamelCase__ : int = int(str(lowerCamelCase_)[2:])
UpperCamelCase__ : int = int(2.6 * m - 5.39)
UpperCamelCase__ : int = int(c / 4)
UpperCamelCase__ : int = int(k / 4)
UpperCamelCase__ : int = int(d + k)
UpperCamelCase__ : int = int(t + u + v + x)
UpperCamelCase__ : int = int(z - (2 * c))
UpperCamelCase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.')
# Response
UpperCamelCase__ : str = f'Your date {date_input}, is a {days[str(lowerCamelCase_)]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
lowerCAmelCase__ = parser.parse_args()
zeller(args.date_input)
| 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''encoder-decoder'''
_lowerCamelCase = True
def __init__( self : Tuple , **UpperCAmelCase_ : Tuple):
super().__init__(**UpperCAmelCase_)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCamelCase__ : List[str] = kwargs.pop('encoder')
UpperCamelCase__ : Any = encoder_config.pop('model_type')
UpperCamelCase__ : Optional[int] = kwargs.pop('decoder')
UpperCamelCase__ : Union[str, Any] = decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
UpperCamelCase__ : Dict = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : List[Any] = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = True
@classmethod
def __UpperCamelCase ( cls : Any , UpperCAmelCase_ : PretrainedConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : Optional[int]):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Dict = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : str = copy.deepcopy(self.__dict__)
UpperCamelCase__ : Union[str, Any] = self.encoder.to_dict()
UpperCamelCase__ : Optional[int] = self.decoder.to_dict()
UpperCamelCase__ : Optional[int] = self.__class__.model_type
return output
| 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 1 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''OwlViTImageProcessor'''
_lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[int] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
UpperCamelCase__ : str = kwargs.pop('feature_extractor')
UpperCamelCase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def __call__( self : Tuple , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Any]="max_length" , UpperCAmelCase_ : str="np" , **UpperCAmelCase_ : Optional[Any]):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.')
if text is not None:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_) or (isinstance(UpperCAmelCase_ , UpperCAmelCase_) and not isinstance(text[0] , UpperCAmelCase_)):
UpperCamelCase__ : Union[str, Any] = [self.tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)]
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(text[0] , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = []
# Maximum number of queries across batch
UpperCamelCase__ : Tuple = max([len(UpperCAmelCase_) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase_) != max_num_queries:
UpperCamelCase__ : List[Any] = t + [' '] * (max_num_queries - len(UpperCAmelCase_))
UpperCamelCase__ : str = self.tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
encodings.append(UpperCAmelCase_)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if return_tensors == "np":
UpperCamelCase__ : List[str] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
UpperCamelCase__ : List[str] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCamelCase__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
UpperCamelCase__ : List[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCamelCase__ : Dict = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0)
UpperCamelCase__ : Union[str, Any] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCamelCase__ : List[str] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0)
UpperCamelCase__ : Union[str, Any] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
UpperCamelCase__ : Optional[Any] = BatchEncoding()
UpperCamelCase__ : str = input_ids
UpperCamelCase__ : Optional[Any] = attention_mask
if query_images is not None:
UpperCamelCase__ : Tuple = BatchEncoding()
UpperCamelCase__ : Tuple = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_).pixel_values
UpperCamelCase__ : Union[str, Any] = query_pixel_values
if images is not None:
UpperCamelCase__ : List[str] = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if text is not None and images is not None:
UpperCamelCase__ : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCamelCase__ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
return self.image_processor.post_process(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any]):
return self.image_processor.post_process_object_detection(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def __UpperCamelCase ( self : str):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any]):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase_ , )
return self.image_processor
| 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = UnCLIPImageVariationPipeline
_lowerCamelCase = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
_lowerCamelCase = IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
_lowerCamelCase = False
@property
def __UpperCamelCase ( self : Tuple):
return 32
@property
def __UpperCamelCase ( self : Tuple):
return 32
@property
def __UpperCamelCase ( self : int):
return self.time_input_dim
@property
def __UpperCamelCase ( self : Any):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : List[Any]):
return 100
@property
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Dict = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
UpperCamelCase__ : int = UnCLIPTextProjModel(**UpperCAmelCase_)
return model
@property
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(**UpperCAmelCase_)
return model
@property
def __UpperCamelCase ( self : List[str]):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __UpperCamelCase ( self : List[Any]):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1)
UpperCamelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.dummy_decoder
UpperCamelCase__ : Dict = self.dummy_text_proj
UpperCamelCase__ : int = self.dummy_text_encoder
UpperCamelCase__ : Tuple = self.dummy_tokenizer
UpperCamelCase__ : Optional[Any] = self.dummy_super_res_first
UpperCamelCase__ : List[str] = self.dummy_super_res_last
UpperCamelCase__ : Optional[Any] = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
UpperCamelCase__ : Tuple = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
UpperCamelCase__ : List[Any] = CLIPImageProcessor(crop_size=32 , size=32)
UpperCamelCase__ : Optional[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Dict=True):
UpperCamelCase__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
if str(UpperCAmelCase_).startswith('mps'):
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
else:
UpperCamelCase__ : str = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
if pil_image:
UpperCamelCase__ : List[Any] = input_image * 0.5 + 0.5
UpperCamelCase__ : str = input_image.clamp(0 , 1)
UpperCamelCase__ : Optional[Any] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
UpperCamelCase__ : int = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Any = 'cpu'
UpperCamelCase__ : Optional[Any] = self.get_dummy_components()
UpperCamelCase__ : Dict = self.pipeline_class(**UpperCAmelCase_)
UpperCamelCase__ : int = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Dict = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_)
UpperCamelCase__ : List[str] = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : int = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : int = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Tuple = 'cpu'
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : Any = self.pipeline_class(**UpperCAmelCase_)
UpperCamelCase__ : Any = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = pipe(**UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_)
UpperCamelCase__ : Dict = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : Dict = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : int = 'cpu'
UpperCamelCase__ : int = self.get_dummy_components()
UpperCamelCase__ : Tuple = self.pipeline_class(**UpperCAmelCase_)
UpperCamelCase__ : Dict = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Dict = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_)
UpperCamelCase__ : Dict = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = output.images
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_)
UpperCamelCase__ : List[str] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
UpperCamelCase__ : str = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCamelCase__ : Any = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = torch.device('cpu')
class __lowercase :
_lowerCamelCase = 1
UpperCamelCase__ : List[Any] = self.get_dummy_components()
UpperCamelCase__ : Dict = self.pipeline_class(**UpperCAmelCase_)
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Union[str, Any] = pipe.decoder.dtype
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : Optional[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCamelCase__ : Optional[int] = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler())
UpperCamelCase__ : Any = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCamelCase__ : str = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler())
UpperCamelCase__ : List[str] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_).images
UpperCamelCase__ : int = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_)
# Don't pass image, instead pass embedding
UpperCamelCase__ : Dict = pipeline_inputs.pop('image')
UpperCamelCase__ : int = pipe.image_encoder(UpperCAmelCase_).image_embeds
UpperCamelCase__ : int = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ , image_embeddings=UpperCAmelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1e-4
@skip_mps
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[int] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCamelCase__ : Optional[Any] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase_ , expected_max_diff=UpperCAmelCase_)
@skip_mps
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = torch_device == 'cpu'
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Optional[int] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCamelCase__ : Tuple = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCAmelCase_)
@skip_mps
def __UpperCamelCase ( self : List[Any]):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCamelCase ( self : str):
return super().test_save_load_local()
@skip_mps
def __UpperCamelCase ( self : Optional[Any]):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png')
UpperCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy')
UpperCamelCase__ : Any = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa)
UpperCamelCase__ : Dict = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = torch.Generator(device='cpu').manual_seed(0)
UpperCamelCase__ : Optional[int] = pipeline(
UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ , 15)
| 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase__ = random.Random()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None) -> str:
if rng is None:
UpperCamelCase__ : List[Any] = global_rng
UpperCamelCase__ : str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class __lowercase (unittest.TestCase ):
def __init__( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : List[str]=2_000 , UpperCAmelCase_ : int=24 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[int]=16_000 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , ):
UpperCamelCase__ : Any = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : Any = min_seq_length
UpperCamelCase__ : Optional[Any] = max_seq_length
UpperCamelCase__ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ : List[str] = feature_size
UpperCamelCase__ : Optional[int] = num_mel_bins
UpperCamelCase__ : Tuple = padding_value
UpperCamelCase__ : List[str] = sampling_rate
UpperCamelCase__ : Optional[Any] = return_attention_mask
UpperCamelCase__ : int = do_normalize
def __UpperCamelCase ( self : Tuple):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Any=False):
def _flatten(UpperCAmelCase_ : List[Any]):
return list(itertools.chain(*UpperCAmelCase_))
if equal_length:
UpperCamelCase__ : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
UpperCamelCase__ : Union[str, Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
UpperCamelCase__ : Optional[Any] = [np.asarray(UpperCAmelCase_) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[Any] = SpeechaTextFeatureExtractionTester(self)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
self.assertTrue(np.all(np.mean(UpperCAmelCase_ , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ , axis=0) - 1) < 1e-3))
def __UpperCamelCase ( self : Optional[int]):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ : Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : int = [np.asarray(UpperCAmelCase_) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ : Union[str, Any] = feature_extractor(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='np').input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
UpperCamelCase__ : str = feature_extractor(speech_inputs[0] , return_tensors='np').input_features
UpperCamelCase__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='np').input_features
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3))
# Test batched
UpperCamelCase__ : Tuple = feature_extractor(UpperCAmelCase_ , return_tensors='np').input_features
UpperCamelCase__ : Optional[Any] = feature_extractor(UpperCAmelCase_ , return_tensors='np').input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3))
# Test 2-D numpy arrays are batched.
UpperCamelCase__ : Optional[Any] = [floats_list((1, x))[0] for x in (800, 800, 800)]
UpperCamelCase__ : Dict = np.asarray(UpperCAmelCase_)
UpperCamelCase__ : str = feature_extractor(UpperCAmelCase_ , return_tensors='np').input_features
UpperCamelCase__ : Tuple = feature_extractor(UpperCAmelCase_ , return_tensors='np').input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3))
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : Union[str, Any] = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : Union[str, Any] = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase__ : Tuple = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Optional[Any] = feature_extractor(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_)
UpperCamelCase__ : List[str] = inputs.input_features
UpperCamelCase__ : int = inputs.attention_mask
UpperCamelCase__ : Optional[Any] = [np.sum(UpperCAmelCase_) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : str = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase__ : Union[str, Any] = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Dict = feature_extractor(
UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = inputs.input_features
UpperCamelCase__ : List[str] = inputs.attention_mask
UpperCamelCase__ : Dict = [np.sum(UpperCAmelCase_) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : Optional[Any] = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : int = feature_extractor(
UpperCAmelCase_ , padding='max_length' , max_length=4 , truncation=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ , )
UpperCamelCase__ : Tuple = inputs.input_features
UpperCamelCase__ : str = inputs.attention_mask
UpperCamelCase__ : Tuple = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : str = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : int = feature_extractor(
UpperCAmelCase_ , padding='longest' , max_length=4 , truncation=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ , )
UpperCamelCase__ : int = inputs.input_features
UpperCamelCase__ : Any = inputs.attention_mask
UpperCamelCase__ : Optional[int] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24))
UpperCamelCase__ : Optional[Any] = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : Optional[Any] = feature_extractor(
UpperCAmelCase_ , padding='longest' , max_length=16 , truncation=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = inputs.input_features
UpperCamelCase__ : int = inputs.attention_mask
UpperCamelCase__ : Tuple = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24))
def __UpperCamelCase ( self : Any):
import torch
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : Dict = np.random.rand(100 , 32).astype(np.floataa)
UpperCamelCase__ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ : List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_features.dtype == np.floataa)
UpperCamelCase__ : int = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Tuple):
from datasets import load_dataset
UpperCamelCase__ : Tuple = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
UpperCamelCase__ : List[str] = ds.sort('id').select(range(UpperCAmelCase_))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : List[Any]):
# fmt: off
UpperCamelCase__ : Any = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
])
# fmt: on
UpperCamelCase__ : List[str] = self._load_datasamples(1)
UpperCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : int = feature_extractor(UpperCAmelCase_ , return_tensors='pt').input_features
self.assertEquals(input_features.shape , (1, 584, 24))
self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1e-4))
| 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1)) for i in range(1_000)]
lowerCAmelCase__ = generate_large_matrix()
lowerCAmelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __UpperCAmelCase ( lowerCamelCase_) -> None:
assert all(row == sorted(lowerCamelCase_ , reverse=lowerCamelCase_) for row in grid)
assert all(list(lowerCamelCase_) == sorted(lowerCamelCase_ , reverse=lowerCamelCase_) for col in zip(*lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : List[str] = len(lowerCamelCase_) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCamelCase__ : int = (left + right) // 2
UpperCamelCase__ : int = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCamelCase__ : Union[str, Any] = mid + 1
else:
UpperCamelCase__ : int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : Tuple = len(grid[0])
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : Dict = find_negative_index(grid[i][:bound])
total += bound
return (len(lowerCamelCase_) * len(grid[0])) - total
def __UpperCAmelCase ( lowerCamelCase_) -> int:
return len([number for row in grid for number in row if number < 0])
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = 0
for row in grid:
for i, number in enumerate(lowerCamelCase_):
if number < 0:
total += len(lowerCamelCase_) - i
break
return total
def __UpperCAmelCase ( ) -> None:
from timeit import timeit
print('Running benchmarks')
UpperCamelCase__ : Optional[int] = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCamelCase__ : Any = timeit(f'{func}(grid=grid)' , setup=lowerCamelCase_ , number=500)
print(f'{func}() took {time:0.4f} seconds')
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCAmelCase__ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCAmelCase__ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCAmelCase__ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCAmelCase__ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModel)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.