code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import random
from typing import Any
def __a ( A__ : list ):
for _ in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE = random.randint(0 , len(lowercase__ ) - 1 )
SCREAMING_SNAKE_CASE = random.randint(0 , len(lowercase__ ) - 1 )
SCREAMING_SNAKE_CASE = data[b], data[a]
return data
if __name__ == "__main__":
__A : Optional[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
__A : List[str] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 16 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 291 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 0 |
"""simple docstring"""
def a ( __snake_case : int, __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ :List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
UpperCAmelCase_ :Any = n - k
# Calculate C(n,k)
for i in range(lowercase__ ):
result *= n - i
result //= i + 1
return result
def a ( __snake_case : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count, lowercase__ ) // (node_count + 1)
def a ( __snake_case : int ):
'''simple docstring'''
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
UpperCAmelCase_ :Union[str, Any] = 1
for i in range(1, n + 1 ):
result *= i
return result
def a ( __snake_case : int ):
'''simple docstring'''
return catalan_number(lowercase__ ) * factorial(lowercase__ )
if __name__ == "__main__":
__lowerCamelCase = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 608 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def A__ ( _a : Optional[Any] , _a : List[Any] , _a : Optional[int] ):
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def A__ ( _a : np.ndarray , _a : Optional[str] , _a : Optional[str] ):
'''simple docstring'''
snake_case__ : Dict =to_pil_image(lowercase__ )
snake_case__ : Tuple =pil_image.size
snake_case__ : Union[str, Any] =pytesseract.image_to_data(lowercase__ , lang=lowercase__ , output_type="""dict""" , config=lowercase__ )
snake_case__ : Dict =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
snake_case__ : Optional[int] =[idx for idx, word in enumerate(lowercase__ ) if not word.strip()]
snake_case__ : List[Any] =[word for idx, word in enumerate(lowercase__ ) if idx not in irrelevant_indices]
snake_case__ : List[str] =[coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
snake_case__ : str =[coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
snake_case__ : List[str] =[coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
snake_case__ : Tuple =[coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case__ : Optional[int] =[]
for x, y, w, h in zip(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case__ : Tuple =[x, y, x + w, y + h]
actual_boxes.append(lowercase__ )
# finally, normalize the bounding boxes
snake_case__ : int =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase__ , lowercase__ , lowercase__ ) )
assert len(lowercase__ ) == len(lowercase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowercase ( _A ):
_a : List[str] = ["""pixel_values"""]
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , a = True , a = None , a = "" , **a , ):
super().__init__(**lowerCamelCase__ )
snake_case__ : str =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case__ : Union[str, Any] =get_size_dict(lowerCamelCase__ )
snake_case__ : Optional[Any] =do_resize
snake_case__ : Optional[Any] =size
snake_case__ : Any =resample
snake_case__ : int =do_rescale
snake_case__ : Optional[int] =rescale_value
snake_case__ : Union[str, Any] =do_normalize
snake_case__ : int =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : Tuple =image_std if image_std is not None else IMAGENET_STANDARD_STD
snake_case__ : Optional[int] =apply_ocr
snake_case__ : Tuple =ocr_lang
snake_case__ : List[Any] =tesseract_config
def lowercase__ ( self , a , a , a = PILImageResampling.BILINEAR , a = None , **a , ):
snake_case__ : int =get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case__ : Any =(size["""height"""], size["""width"""])
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase__ ( self , a , a , a = None , **a , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase__ ( self , a , a , a , a = None , **a , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase__ ( self , a , a = None , a = None , a=None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
snake_case__ : str =do_resize if do_resize is not None else self.do_resize
snake_case__ : str =size if size is not None else self.size
snake_case__ : Any =get_size_dict(lowerCamelCase__ )
snake_case__ : List[Any] =resample if resample is not None else self.resample
snake_case__ : Optional[int] =do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Any =do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : str =image_mean if image_mean is not None else self.image_mean
snake_case__ : str =image_std if image_std is not None else self.image_std
snake_case__ : List[Any] =apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case__ : Union[str, Any] =ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case__ : Any =tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case__ : Dict =make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
snake_case__ : List[str] =[to_numpy_array(lowerCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
snake_case__ : Tuple =[]
snake_case__ : List[Any] =[]
for image in images:
snake_case__ : List[Any] =apply_tesseract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
snake_case__ : List[Any] =[self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
snake_case__ : Any =[self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
snake_case__ : str =[self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
snake_case__ : int =[to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
snake_case__ : int =BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCamelCase__ )
if apply_ocr:
snake_case__ : Optional[Any] =words_batch
snake_case__ : Dict =boxes_batch
return data
| 385 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = load_tool("text-to-speech" )
self.tool.setup()
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = self.tool("hey" )
SCREAMING_SNAKE_CASE__ : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ), ) )
def A_ ( self : int ) -> Dict:
"""simple docstring"""
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = self.tool("hey" )
SCREAMING_SNAKE_CASE__ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ), ) )
| 663 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = """xglm"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , lowercase_ : int=256008 , lowercase_ : str=2048 , lowercase_ : Dict=1024 , lowercase_ : Union[str, Any]=4096 , lowercase_ : Tuple=24 , lowercase_ : Optional[int]=16 , lowercase_ : str="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.0 , lowercase_ : Dict=0.0 , lowercase_ : Dict=0.02 , lowercase_ : str=True , lowercase_ : Dict=True , lowercase_ : List[str]=2 , lowercase_ : Optional[int]=1 , lowercase_ : Tuple=0 , lowercase_ : Dict=2 , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = d_model
SCREAMING_SNAKE_CASE_ : int = ffn_dim
SCREAMING_SNAKE_CASE_ : List[str] = num_layers
SCREAMING_SNAKE_CASE_ : List[Any] = attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = activation_function
SCREAMING_SNAKE_CASE_ : Tuple = dropout
SCREAMING_SNAKE_CASE_ : str = attention_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE_ : List[Any] = layerdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = init_std
SCREAMING_SNAKE_CASE_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : str = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 512 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_0 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = use_labels
__lowerCamelCase = scope
def lowerCamelCase_ ( self ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self ):
(
__lowerCamelCase
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ):
__lowerCamelCase = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ):
__lowerCamelCase = True
__lowerCamelCase = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ):
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ , )
__lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , ):
__lowerCamelCase = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
A = (BertGenerationDecoder,) if is_torch_available() else ()
A = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self ):
__lowerCamelCase = BertGenerationEncoderTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def lowerCamelCase_ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def lowerCamelCase_ ( self ):
# This regression test was failing with PyTorch < 1.3
(
__lowerCamelCase
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
__lowerCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
__lowerCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 479 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case( UpperCAmelCase ):
__snake_case: str = """new-model"""
if is_tf_available():
class _snake_case( UpperCAmelCase ):
__snake_case: Any = NewModelConfig
@require_tf
class _snake_case( unittest.TestCase ):
@slow
def _UpperCamelCase (self : str ) -> Any:
"""simple docstring"""
A__ = """bert-base-cased"""
A__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase (self : List[Any] ) -> Any:
"""simple docstring"""
A__ = """bert-base-cased"""
A__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
A__ = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase (self : Any ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase (self : Any ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
A__ = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase (self : str ) -> int:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase (self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase (self : str ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def _UpperCamelCase (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase (self : int ) -> Tuple:
"""simple docstring"""
A__ = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_44_10 )
def _UpperCamelCase (self : str ) -> Tuple:
"""simple docstring"""
A__ = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_44_10 )
def _UpperCamelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A__ = copy.deepcopy(model.config )
A__ = ["""FunnelBaseModel"""]
A__ = TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
A__ = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('new-model' , lowerCamelCase__ )
A__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = BertModelTester(self ).get_config()
A__ = NewModelConfig(**tiny_config.to_dict() )
A__ = auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
A__ = auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCamelCase (self : List[str] ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
A__ = TFAutoModel.from_pretrained('bert-base' )
def _UpperCamelCase (self : int ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
A__ = TFAutoModel.from_pretrained(lowerCamelCase__ , revision='aaaaaa' )
def _UpperCamelCase (self : Optional[int] ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
A__ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _UpperCamelCase (self : Tuple ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase__ , 'Use `from_pt=True` to load this model' ):
A__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def _UpperCamelCase (self : Optional[int] ) -> str:
"""simple docstring"""
A__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
A__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 531 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase :int = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCAmelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 561 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=True ,_snake_case=True ,_snake_case=True ,_snake_case=99 ,_snake_case=32 ,_snake_case=5 ,_snake_case=4 ,_snake_case=4 ,_snake_case="gelu" ,_snake_case=0.0 ,_snake_case=0.1 ,_snake_case=True ,_snake_case=5_12 ,_snake_case=16 ,_snake_case=2 ,_snake_case=0.02 ,_snake_case=3 ,_snake_case=4 ,_snake_case=None ,):
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Optional[Any] = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_multiple_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[str] = weight_tying
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Dict = num_choices
UpperCAmelCase_ : Any = scope
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : str = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase__ ( self ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[int] = True
return config, input_ids, input_mask, token_labels
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,use_cache=lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase_ : Optional[int] = torch.cat([input_mask, next_mask] ,dim=-1 )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ )
UpperCAmelCase_ : Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,past_key_values=lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,)["""hidden_states"""][0]
# select random slice
UpperCAmelCase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__A : int =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__A : str =(
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__A : Union[str, Any] =False
__A : Dict =False
__A : List[str] =False
__A : Optional[int] =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = GPTNeoXJapaneseModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_ : Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = """abeja/gpt-neox-japanese-2.7b"""
UpperCAmelCase_ : List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCAmelCase_ : Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCAmelCase_ : Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase_ : Optional[Any] = []
for prompt in prompts:
UpperCAmelCase_ : str = tokenizer(lowerCamelCase__ ,return_tensors="pt" ).input_ids
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase__ ,max_length=50 )
UpperCAmelCase_ : Dict = tokenizer.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 71 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: int = 0
UpperCAmelCase_: Any = len(lowercase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
UpperCAmelCase_: List[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase_: List[Any] = left
UpperCAmelCase_: Tuple = point
elif point > right:
UpperCAmelCase_: str = right
UpperCAmelCase_: Tuple = point
else:
if item < current_item:
UpperCAmelCase_: Dict = point - 1
else:
UpperCAmelCase_: List[Any] = point + 1
return None
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: Tuple , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif point > right:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , lowercase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , point + 1 , lowercase__ )
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
if collection != sorted(lowercase__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Any = 0
if debug == 1:
a : List[str] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a : Union[str, Any] = 67
a : Optional[int] = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 556 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Union[str, Any] = logging.get_logger(__name__)
__A : str = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = """longformer"""
def __init__( self : List[str] , __lowerCamelCase : Union[List[int], int] = 512 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 30522 , __lowerCamelCase : int = 768 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 3072 , __lowerCamelCase : str = "gelu" , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 2 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1e-12 , __lowerCamelCase : bool = False , **__lowerCamelCase : Tuple , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
SCREAMING_SNAKE_CASE = attention_window
SCREAMING_SNAKE_CASE = sep_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = onnx_export
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : "PretrainedConfig" , __lowerCamelCase : str = "default" , __lowerCamelCase : "List[PatchingSpec]" = None ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE = True
@property
def _snake_case ( self : Optional[Any] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE = {0: """batch"""}
return outputs
@property
def _snake_case ( self : List[Any] ):
return 1e-4
@property
def _snake_case ( self : Tuple ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self : List[Any] , __lowerCamelCase : "PreTrainedTokenizerBase" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(
preprocessor=lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE = 1
return inputs
| 16 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 0 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowercase_ = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 291 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 0 |
"""simple docstring"""
import string
import numpy
def a ( __snake_case : int, __snake_case : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a, lowercase__ )
class _snake_case :
'''simple docstring'''
UpperCamelCase__ =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase__ =numpy.vectorize(lambda A__ : x % 36 )
UpperCamelCase__ =numpy.vectorize(A__ )
def __init__( self : Optional[Any] , snake_case : numpy.ndarray ):
UpperCAmelCase_ :Dict = self.modulus(lowerCamelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase_ :Any = encrypt_key.shape[0]
def snake_case_ ( self : List[Any] , snake_case : str ):
return self.key_string.index(lowerCamelCase__ )
def snake_case_ ( self : List[Any] , snake_case : int ):
return self.key_string[round(lowerCamelCase__ )]
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase_ :str = det % len(self.key_string )
UpperCAmelCase_ :int = len(self.key_string )
if greatest_common_divisor(lowerCamelCase__ , len(self.key_string ) ) != 1:
UpperCAmelCase_ :int = (
f'determinant modular {req_l} of encryption key({det}) '
f'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(lowerCamelCase__ )
def snake_case_ ( self : int , snake_case : str ):
UpperCAmelCase_ :str = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase_ :int = chars[-1]
while len(lowerCamelCase__ ) % self.break_key != 0:
chars.append(lowerCamelCase__ )
return "".join(lowerCamelCase__ )
def snake_case_ ( self : Tuple , snake_case : str ):
UpperCAmelCase_ :Optional[int] = self.process_text(text.upper() )
UpperCAmelCase_ :Optional[int] = """"""
for i in range(0 , len(lowerCamelCase__ ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase_ :Optional[Any] = text[i : i + self.break_key]
UpperCAmelCase_ :Any = [self.replace_letters(lowerCamelCase__ ) for char in batch]
UpperCAmelCase_ :Any = numpy.array([vec] ).T
UpperCAmelCase_ :Optional[int] = self.modulus(self.encrypt_key.dot(lowerCamelCase__ ) ).T.tolist()[
0
]
UpperCAmelCase_ :Dict = """""".join(
self.replace_digits(lowerCamelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase_ :int = det % len(self.key_string )
UpperCAmelCase_ :List[Any] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCAmelCase_ :Optional[Any] = i
break
UpperCAmelCase_ :Optional[int] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowerCamelCase__ ) )
def snake_case_ ( self : Tuple , snake_case : str ):
UpperCAmelCase_ :List[str] = self.make_decrypt_key()
UpperCAmelCase_ :Dict = self.process_text(text.upper() )
UpperCAmelCase_ :Any = """"""
for i in range(0 , len(lowerCamelCase__ ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase_ :List[str] = text[i : i + self.break_key]
UpperCAmelCase_ :Dict = [self.replace_letters(lowerCamelCase__ ) for char in batch]
UpperCAmelCase_ :List[str] = numpy.array([vec] ).T
UpperCAmelCase_ :Optional[Any] = self.modulus(decrypt_key.dot(lowerCamelCase__ ) ).T.tolist()[0]
UpperCAmelCase_ :str = """""".join(
self.replace_digits(lowerCamelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a ( ):
'''simple docstring'''
UpperCAmelCase_ :str = int(input('''Enter the order of the encryption key: ''' ) )
UpperCAmelCase_ :List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(lowercase__ ):
UpperCAmelCase_ :Any = [int(lowercase__ ) for x in input().split()]
hill_matrix.append(lowercase__ )
UpperCAmelCase_ :Dict = HillCipher(numpy.array(lowercase__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
UpperCAmelCase_ :Optional[int] = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
UpperCAmelCase_ :List[str] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(lowercase__ ) )
elif option == "2":
UpperCAmelCase_ :Any = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 608 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : str = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class _lowercase ( _A , unittest.TestCase ):
_a : List[str] = GPTSwaTokenizer
_a : Dict = False
_a : str = True
_a : int = False
def lowercase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : int =GPTSwaTokenizer(lowerCamelCase__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self , a ):
snake_case__ : List[Any] ="""This is a test"""
snake_case__ : Optional[Any] ="""This is a test"""
return input_text, output_text
def lowercase__ ( self ):
snake_case__ : int ="""<s>"""
snake_case__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase__ ( self ):
snake_case__ : Tuple =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCamelCase__ ) , 2_0_0_0 )
def lowercase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =GPTSwaTokenizer(lowerCamelCase__ )
snake_case__ : Optional[int] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
snake_case__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
snake_case__ : Tuple =tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
snake_case__ : Optional[Any] =tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowercase__ ( self ):
snake_case__ : Dict =GPTSwaTokenizer(lowerCamelCase__ )
snake_case__ : List[str] =["""This is a test""", """I was born in 92000, and this is falsé."""]
snake_case__ : Union[str, Any] =[
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def lowercase__ ( self ):
snake_case__ : Tuple =[
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
snake_case__ : str ={"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=lowerCamelCase__ , )
| 385 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 0 |
_lowerCamelCase : List[str] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def _a ( SCREAMING_SNAKE_CASE__ : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Dict = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = """""".join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
SCREAMING_SNAKE_CASE__ : int = b"""=""" * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase__ ) , 6 ) ).encode()
+ padding
)
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Dict = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ , lowercase__ ):
try:
SCREAMING_SNAKE_CASE__ : List[str] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
SCREAMING_SNAKE_CASE__ : int = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
SCREAMING_SNAKE_CASE__ : int = encoded_data[:-padding]
SCREAMING_SNAKE_CASE__ : Optional[int] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
SCREAMING_SNAKE_CASE__ : List[str] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase__ ) , 8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = """ClapFeatureExtractor"""
__UpperCamelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any]):
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__)
def __call__( self : List[Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Dict=None , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''sampling_rate''' , lowerCamelCase__)
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''')
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__)
if audios is not None:
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor(
lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__)
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE_ : Tuple = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__) , tensor_type=lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , *lowercase_ : List[str] , **lowercase_ : Any):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , *lowercase_ : Any , **lowercase_ : Optional[int]):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 512 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_a : Any = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_a : Any = {'facebook/blenderbot_small-90M': 5_12}
def UpperCamelCase__ ( _A: Optional[int] ):
'''simple docstring'''
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
__lowerCamelCase = set(lowercase__ )
return pairs
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="__start__" , UpperCAmelCase="__end__" , UpperCAmelCase="__unk__" , UpperCAmelCase="__null__" , **UpperCAmelCase , ):
super().__init__(unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__ )
with open(lowerCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
__lowerCamelCase = json.load(lowerCamelCase__ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ , encoding="""utf-8""" ) as merges_handle:
__lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in merges]
__lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCamelCase = {}
@property
def lowerCamelCase_ ( self ):
return len(self.encoder )
def lowerCamelCase_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCAmelCase ):
if token in self.cache:
return self.cache[token]
__lowerCamelCase = re.sub("""([.,!?()])""" , r""" \1""" , lowerCamelCase__ )
__lowerCamelCase = re.sub("""(')""" , r""" \1 """ , lowerCamelCase__ )
__lowerCamelCase = re.sub(r"""\s{2,}""" , """ """ , lowerCamelCase__ )
if "\n" in token:
__lowerCamelCase = token.replace("""\n""" , """ __newln__""" )
__lowerCamelCase = token.split(""" """ )
__lowerCamelCase = []
for token in tokens:
if not len(lowerCamelCase__ ):
continue
__lowerCamelCase = token.lower()
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__lowerCamelCase = get_pairs(lowerCamelCase__ )
if not pairs:
words.append(lowerCamelCase__ )
continue
while True:
__lowerCamelCase = min(lowerCamelCase__ , key=lambda UpperCAmelCase : self.bpe_ranks.get(lowerCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(lowerCamelCase__ ):
try:
__lowerCamelCase = word.index(lowerCamelCase__ , lowerCamelCase__ )
new_word.extend(word[i:j] )
__lowerCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
__lowerCamelCase = get_pairs(lowerCamelCase__ )
__lowerCamelCase = """@@ """.join(lowerCamelCase__ )
__lowerCamelCase = word[:-4]
__lowerCamelCase = word
words.append(lowerCamelCase__ )
return " ".join(lowerCamelCase__ )
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = []
__lowerCamelCase = re.findall(r"""\S+\n?""" , lowerCamelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__ ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = token.lower()
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCAmelCase ):
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = """ """.join(lowerCamelCase__ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + """\n""" )
__lowerCamelCase = 0
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__lowerCamelCase = token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 479 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 0 |
'''simple docstring'''
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = len(lowercase__ )
print('The following activities are selected:' )
# The first activity is always selected
A__ = 0
print(lowercase__ ,end=',' )
# Consider rest of the activities
for j in range(lowercase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase__ ,end=',' )
A__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = [1, 3, 0, 5, 8, 5]
lowerCAmelCase_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 531 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :List[str] = logging.get_logger(__name__)
lowerCAmelCase :Dict = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = """vit_msn"""
def __init__( self : Union[str, Any] , _A : Optional[int]=768 , _A : List[str]=12 , _A : Optional[Any]=12 , _A : Tuple=3072 , _A : Optional[Any]="gelu" , _A : Any=0.0 , _A : List[str]=0.0 , _A : str=0.02 , _A : str=1E-06 , _A : Tuple=224 , _A : Any=16 , _A : int=3 , _A : List[str]=True , **_A : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase__ )
__magic_name__ : str = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : List[str] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : List[Any] = layer_norm_eps
__magic_name__ : Any = image_size
__magic_name__ : Tuple = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : Tuple = qkv_bias
| 561 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,*_snake_case ,**_snake_case ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." ,lowerCamelCase__ ,)
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
| 71 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 0 |
from PIL import Image
def lowerCAmelCase_ (lowerCAmelCase__: Image , lowerCAmelCase__: float ):
"""simple docstring"""
def brightness(lowerCAmelCase__: int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
a : int = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 556 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__A : str = None
__A : Dict = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__A : List[Any] = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
__A : List[Any] = '▁'
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ = BarthezTokenizer
def __init__( self : Union[str, Any] , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : List[Any]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : int="<mask>" , **__lowerCamelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 16 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowercase_ = logging.getLogger(__name__)
lowercase_ = 'pytorch_model.bin'
@dataclasses.dataclass
class __lowerCAmelCase :
_a = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
_a = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class __lowerCAmelCase :
_a = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
_a = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
_a = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """A csv or a json file containing the validation data."""} )
_a = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """The name of the task to train on."""} , )
_a = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class __lowerCAmelCase :
_a = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
_a = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
_a = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
_a = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
_a = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
_a = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
_a = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
_a = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
_a = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
_a = dataclasses.field(
default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
_a = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Random seed for initialization."""} , )
def a ( A__ : List[Any] , A__ : Tuple , A__ : int , A__ : Optional[int] , A__ : Dict , A__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowercase =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_lowercase =dataset.filter(lambda A__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_lowercase =int(eval_result * len(lowercase__ ) )
print(lowercase__ )
_lowercase =dataset.sort('probability' , reverse=lowercase__ )
_lowercase =dataset.select(range(lowercase__ ) )
_lowercase =dataset.remove_columns(['label', 'probability'] )
_lowercase =dataset.rename_column('prediction' , 'label' )
_lowercase =dataset.map(lambda A__ : {"label": idalabel[example["label"]]} )
_lowercase =dataset.shuffle(seed=args.seed )
_lowercase =os.path.join(lowercase__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase__ , index=lowercase__ )
else:
dataset.to_json(lowercase__ )
def a ( A__ : Tuple , A__ : Optional[Any] , A__ : Optional[Any] , A__ : Dict , **A__ : Dict ) -> Tuple:
"""simple docstring"""
_lowercase =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_lowercase =STModelArguments(model_name_or_path=lowercase__ )
_lowercase =STDataArguments(train_file=lowercase__ , infer_file=lowercase__ )
_lowercase =STTrainingArguments(output_dir=lowercase__ )
_lowercase =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase__ ).items():
setattr(lowercase__ , lowercase__ , lowercase__ )
for key, value in kwargs.items():
if hasattr(lowercase__ , lowercase__ ):
setattr(lowercase__ , lowercase__ , lowercase__ )
# Sanity checks
_lowercase ={}
_lowercase =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_lowercase =args.train_file
_lowercase =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_lowercase =args.eval_file
for key in data_files:
_lowercase =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
_lowercase =extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
_lowercase =F'''{args.output_dir}/self-train_iter-{{}}'''.format
_lowercase =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase__ )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
accelerator.wait_for_everyone()
_lowercase =None
_lowercase =None
_lowercase =0
_lowercase =False
# Show the progress bar
_lowercase =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_lowercase =data_dir_format(lowercase__ )
assert os.path.exists(lowercase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_lowercase =os.path.join(lowercase__ , 'stage-1' )
_lowercase ={
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase__ , lowercase__ ):
arguments_dict.update({key: value} )
_lowercase =os.path.join(lowercase__ , 'best-checkpoint' , lowercase__ )
if os.path.exists(lowercase__ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , lowercase__ , lowercase__ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , lowercase__ )
finetune(**lowercase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase__ )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , lowercase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_lowercase =os.path.join(lowercase__ , 'best-checkpoint' )
_lowercase =os.path.join(lowercase__ , 'stage-2' )
# Update arguments_dict
_lowercase =model_path
_lowercase =data_files["""train"""]
_lowercase =current_output_dir
_lowercase =os.path.join(lowercase__ , 'best-checkpoint' , lowercase__ )
if os.path.exists(lowercase__ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , lowercase__ , lowercase__ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , lowercase__ )
finetune(**lowercase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase__ )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , lowercase__ )
_lowercase =iteration
_lowercase =data_dir_format(iteration + 1 )
_lowercase =AutoConfig.from_pretrained(os.path.join(lowercase__ , 'best-checkpoint' ) )
_lowercase =config.idalabel
_lowercase =os.path.join(lowercase__ , 'eval_results_best-checkpoint.json' )
_lowercase =os.path.join(lowercase__ , 'test_results_best-checkpoint.json' )
assert os.path.exists(lowercase__ )
with open(lowercase__ , 'r' ) as f:
_lowercase =float(json.load(lowercase__ )[args.eval_metric] )
_lowercase =os.path.join(lowercase__ , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(lowercase__ )
# Loading the dataset from local csv or json files.
_lowercase =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )["""data"""]
_lowercase =load_dataset('csv' , data_files={'data': infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(lowercase__ , exist_ok=lowercase__ )
shutil.copy(lowercase__ , os.path.join(lowercase__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase__ ):
shutil.copy(lowercase__ , os.path.join(lowercase__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.wait_for_everyone()
_lowercase =os.path.join(lowercase__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_lowercase =eval_result
if best_iteration is None:
_lowercase =new_iteration
_lowercase =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_lowercase =new_iteration
_lowercase =new_eval_result
_lowercase =0
else:
if new_eval_result == best_eval_result:
_lowercase =new_iteration
_lowercase =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_lowercase =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , lowercase__ )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase__ , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase__ , 'eval_results_best-iteration.json' ) , )
| 291 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 0 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__lowerCamelCase = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__lowerCamelCase = logging.get_logger(__name__)
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ ="""maskformer"""
UpperCamelCase__ ={"""hidden_size""": """mask_feature_size"""}
UpperCamelCase__ =["""resnet""", """swin"""]
UpperCamelCase__ =["""detr"""]
def __init__( self : Any , snake_case : int = 256 , snake_case : int = 256 , snake_case : float = 0.1 , snake_case : bool = False , snake_case : Optional[Dict] = None , snake_case : Optional[Dict] = None , snake_case : float = 0.02 , snake_case : float = 1.0 , snake_case : float = 1.0 , snake_case : float = 1.0 , snake_case : float = 20.0 , snake_case : Optional[bool] = None , **snake_case : str , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase_ :Optional[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase_ :List[Any] = backbone_config.pop('''model_type''' )
UpperCAmelCase_ :Any = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ :Optional[int] = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase_ :Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase_ :Optional[Any] = (
decoder_config.pop('''model_type''' ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase_ :Any = CONFIG_MAPPING[decoder_type]
UpperCAmelCase_ :Optional[Any] = config_class.from_dict(lowerCamelCase__ )
UpperCAmelCase_ :Tuple = backbone_config
UpperCAmelCase_ :Optional[int] = decoder_config
# main feature dimension for the model
UpperCAmelCase_ :Union[str, Any] = fpn_feature_size
UpperCAmelCase_ :int = mask_feature_size
# initializer
UpperCAmelCase_ :Union[str, Any] = init_std
UpperCAmelCase_ :Tuple = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase_ :Dict = cross_entropy_weight
UpperCAmelCase_ :int = dice_weight
UpperCAmelCase_ :Optional[Any] = mask_weight
UpperCAmelCase_ :int = use_auxiliary_loss
UpperCAmelCase_ :List[str] = no_object_weight
UpperCAmelCase_ :Any = output_auxiliary_logits
UpperCAmelCase_ :Union[str, Any] = self.decoder_config.encoder_attention_heads
UpperCAmelCase_ :Any = self.decoder_config.num_hidden_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def snake_case_ ( cls : Tuple , snake_case : PretrainedConfig , snake_case : PretrainedConfig , **snake_case : List[Any] ):
return cls(
backbone_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , **lowerCamelCase__ , )
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ :Dict = self.backbone_config.to_dict()
UpperCAmelCase_ :str = self.decoder_config.to_dict()
UpperCAmelCase_ :Optional[int] = self.__class__.model_type
return output
| 608 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowerCamelCase : List[Any] = """bert-base-cased"""
__lowerCamelCase : Dict = """fp16"""
__lowerCamelCase : List[Any] = """bf16"""
__lowerCamelCase : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class _lowercase ( _A ):
def lowercase__ ( self ):
super().setUp()
snake_case__ : str =dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def lowercase__ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCamelCase__ ):
snake_case__ : Optional[int] =self.dist_env.copy()
snake_case__ : List[Any] =F"{i + 1}"
snake_case__ : List[Any] =strategy
with mockenv_context(**lowerCamelCase__ ):
snake_case__ : Tuple =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowercase__ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCamelCase__ ):
snake_case__ : Optional[int] =self.dist_env.copy()
snake_case__ : Optional[int] =prefetch_policy
with mockenv_context(**lowerCamelCase__ ):
snake_case__ : Dict =FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowercase__ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCamelCase__ ):
snake_case__ : Optional[int] =self.dist_env.copy()
snake_case__ : Tuple =state_dict_type
with mockenv_context(**lowerCamelCase__ ):
snake_case__ : List[str] =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowercase__ ( self ):
snake_case__ : List[Any] =AutoModel.from_pretrained(lowerCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
snake_case__ : Optional[int] =self.dist_env.copy()
snake_case__ : int =policy
if policy == "TRANSFORMER_BASED_WRAP":
snake_case__ : Optional[Any] ="""BertLayer"""
elif policy == "SIZE_BASED_WRAP":
snake_case__ : Union[str, Any] ="""2000"""
with mockenv_context(**lowerCamelCase__ ):
snake_case__ : int =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
snake_case__ : Optional[int] =self.dist_env.copy()
snake_case__ : str ="""TRANSFORMER_BASED_WRAP"""
snake_case__ : Union[str, Any] ="""T5Layer"""
with mockenv_context(**lowerCamelCase__ ):
snake_case__ : Any =FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
snake_case__ : Dict =self.dist_env.copy()
snake_case__ : int ="""SIZE_BASED_WRAP"""
snake_case__ : Union[str, Any] ="""0"""
with mockenv_context(**lowerCamelCase__ ):
snake_case__ : Optional[Any] =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowercase__ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
snake_case__ : Dict =self.dist_env.copy()
snake_case__ : Dict =mp_dtype
with mockenv_context(**lowerCamelCase__ ):
snake_case__ : Optional[Any] =Accelerator()
if mp_dtype == "fp16":
snake_case__ : Tuple =torch.floataa
elif mp_dtype == "bf16":
snake_case__ : Tuple =torch.bfloataa
snake_case__ : int =MixedPrecision(param_dtype=lowerCamelCase__ , reduce_dtype=lowerCamelCase__ , buffer_dtype=lowerCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowerCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowerCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCamelCase__ )
def lowercase__ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
snake_case__ : List[str] =self.dist_env.copy()
snake_case__ : Dict =str(lowerCamelCase__ ).lower()
with mockenv_context(**lowerCamelCase__ ):
snake_case__ : List[str] =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowerCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class _lowercase ( _A ):
def lowercase__ ( self ):
super().setUp()
snake_case__ : str =0.82
snake_case__ : int =[
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
snake_case__ : int ={
"""multi_gpu_fp16""": 3_2_0_0,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2_0_0_0,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
snake_case__ : Optional[Any] =1_6_0
snake_case__ : List[str] =1_6_0
snake_case__ : Union[str, Any] =inspect.getfile(accelerate.test_utils )
snake_case__ : Dict =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def lowercase__ ( self ):
snake_case__ : int =os.path.join(self.test_scripts_folder , """test_performance.py""" )
snake_case__ : List[str] =["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
snake_case__ : Optional[Any] =cmd.copy()
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(F"--fsdp_sharding_strategy={i+1}" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F"--output_dir={self.tmpdir}",
F"--performance_lower_bound={self.performance_lower_bound}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def lowercase__ ( self ):
snake_case__ : List[Any] =os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
snake_case__ : Any =[
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(lowerCamelCase__ ):
snake_case__ : Optional[Any] =cmd.copy()
cmd_config.append(F"--fsdp_sharding_strategy={i+1}" )
if strategy != "FULL_SHARD":
continue
snake_case__ : Optional[int] =len(lowerCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
snake_case__ : Tuple =cmd_config[:state_dict_config_index]
cmd_config.append(F"--fsdp_state_dict_type={state_dict_type}" )
cmd_config.extend(
[
self.test_file_path,
F"--output_dir={self.tmpdir}",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
snake_case__ : List[Any] =cmd_config[:-1]
snake_case__ : Tuple =os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
F"--resume_from_checkpoint={resume_from_checkpoint}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def lowercase__ ( self ):
snake_case__ : List[str] =os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
snake_case__ : Optional[int] =[
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
snake_case__ : Optional[int] =cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(F"--fsdp_sharding_strategy={i+1}" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F"--output_dir={self.tmpdir}",
F"--peak_memory_upper_bound={peak_mem_upper_bound}",
F"--n_train={self.n_train}",
F"--n_val={self.n_val}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
| 385 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 0 |
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
def A_ ( self : List[str], _UpperCAmelCase : int, _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
SCREAMING_SNAKE_CASE__ : str = self.__min_dist_top_down_dp(m - 1, n - 1 )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.__min_dist_top_down_dp(lowerCamelCase__, n - 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = self.__min_dist_top_down_dp(m - 1, lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.__min_dist_top_down_dp(m - 1, n - 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = 1 + min(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return self.dp[m][n]
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = worda
SCREAMING_SNAKE_CASE__ : Union[str, Any] = worda
SCREAMING_SNAKE_CASE__ : str = [[-1 for _ in range(len(lowerCamelCase__ ) )] for _ in range(len(lowerCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase__ ) - 1, len(lowerCamelCase__ ) - 1 )
def A_ ( self : List[Any], _UpperCAmelCase : str, _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = worda
SCREAMING_SNAKE_CASE__ : str = worda
SCREAMING_SNAKE_CASE__ : Dict = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
SCREAMING_SNAKE_CASE__ : str = j
elif j == 0: # second string is empty
SCREAMING_SNAKE_CASE__ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
SCREAMING_SNAKE_CASE__ : Tuple = self.dp[i - 1][j - 1]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dp[i][j - 1]
SCREAMING_SNAKE_CASE__ : str = self.dp[i - 1][j]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dp[i - 1][j - 1]
SCREAMING_SNAKE_CASE__ : List[str] = 1 + min(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
_lowerCamelCase : List[Any] = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
_lowerCamelCase : Optional[int] = input('''Enter the first string: ''').strip()
_lowerCamelCase : Any = input('''Enter the second string: ''').strip()
print()
print(f"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(f"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 663 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 0 |
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
UpperCAmelCase_ : List[str] = """Usage of script: script_name <size_of_canvas:int>"""
UpperCAmelCase_ : List[Any] = [0] * 100 + [1] * 10
random.shuffle(choice)
def _A (__a ) -> list[list[bool]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[False for i in range(lowercase__ )] for j in range(lowercase__ )]
return canvas
def _A (__a ) -> None:
"""simple docstring"""
for i, row in enumerate(lowercase__ ):
for j, _ in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = bool(random.getrandbits(1 ) )
def _A (__a ) -> list[list[bool]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowercase__ ):
for c, pt in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = __judge_point(
lowercase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
SCREAMING_SNAKE_CASE_ : Dict = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
SCREAMING_SNAKE_CASE_ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
SCREAMING_SNAKE_CASE_ : List[Any] = pt
if pt:
if alive < 2:
SCREAMING_SNAKE_CASE_ : List[str] = False
elif alive == 2 or alive == 3:
SCREAMING_SNAKE_CASE_ : int = True
elif alive > 3:
SCREAMING_SNAKE_CASE_ : List[Any] = False
else:
if alive == 3:
SCREAMING_SNAKE_CASE_ : Any = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
UpperCAmelCase_ : Optional[Any] = int(sys.argv[1])
# main working structure of this module.
UpperCAmelCase_ : Union[str, Any] = create_canvas(canvas_size)
seed(c)
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = plt.subplots()
fig.show()
UpperCAmelCase_ : Union[str, Any] = ListedColormap(["""w""", """k"""])
try:
while True:
UpperCAmelCase_ : Optional[int] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 512 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( __UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = KandinskyInpaintPipeline
A = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
A = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
A = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A = False
@property
def lowerCamelCase_ ( self ):
return 3_2
@property
def lowerCamelCase_ ( self ):
return 3_2
@property
def lowerCamelCase_ ( self ):
return self.time_input_dim
@property
def lowerCamelCase_ ( self ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self ):
return 1_0_0
@property
def lowerCamelCase_ ( self ):
__lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCamelCase_ ( self ):
torch.manual_seed(0 )
__lowerCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__lowerCamelCase = MultilingualCLIP(lowerCamelCase__ )
__lowerCamelCase = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCamelCase = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def lowerCamelCase_ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self ):
torch.manual_seed(0 )
__lowerCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_unet
__lowerCamelCase = self.dummy_movq
__lowerCamelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCamelCase__ , )
__lowerCamelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=0 ):
__lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCamelCase__ )
# create init_image
__lowerCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
__lowerCamelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
__lowerCamelCase = 0
if str(lowerCamelCase__ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__lowerCamelCase = output.images
__lowerCamelCase = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowerCamelCase_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCamelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
__lowerCamelCase = 0
__lowerCamelCase = """a hat"""
__lowerCamelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
__lowerCamelCase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCamelCase = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__lowerCamelCase = pipeline(
lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 479 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ ,lowercase__ )
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A__ = s_dict.pop(lowercase__ )
elif "subsample" in key:
A__ = s_dict.pop(lowercase__ )
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = emb.weight.shape
A__ = nn.Linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
A__ = emb.weight.data
return lin_layer
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = torch.load(lowercase__ ,map_location='cpu' )
A__ = mam_aaa["""args"""]
A__ = mam_aaa["""model"""]
A__ = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(lowercase__ )
rename_keys(lowercase__ )
A__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
A__ = args.share_decoder_input_output_embed
A__ = [int(lowercase__ ) for i in args.conv_kernel_sizes.split(',' )]
A__ = SpeechaTextConfig(
vocab_size=lowercase__ ,max_source_positions=args.max_source_positions ,max_target_positions=args.max_target_positions ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function='relu' ,num_conv_layers=len(lowercase__ ) ,conv_channels=args.conv_channels ,conv_kernel_sizes=lowercase__ ,input_feat_per_channel=args.input_feat_per_channel ,input_channels=args.input_channels ,tie_word_embeddings=lowercase__ ,num_beams=5 ,max_length=200 ,use_cache=lowercase__ ,decoder_start_token_id=2 ,early_stopping=lowercase__ ,)
A__ = SpeechaTextForConditionalGeneration(lowercase__ )
A__ = model.model.load_state_dict(lowercase__ ,strict=lowercase__ )
if len(lowercase__ ) > 0 and not set(lowercase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ = lm_head_weights
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 531 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase :Any = logging.get_logger(__name__)
lowerCAmelCase :Tuple = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase :Any = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase :Dict = 0
lowerCAmelCase :str = 0XE_000
lowerCAmelCase :str = 0XE_001
lowerCAmelCase :Any = 0XE_002
lowerCAmelCase :str = 0XE_003
lowerCAmelCase :List[Any] = 0XE_004
# Maps special codepoints to human-readable names.
lowerCAmelCase :int = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '''[CLS]''',
SEP: '''[SEP]''',
BOS: '''[BOS]''',
MASK: '''[MASK]''',
PAD: '''[PAD]''',
RESERVED: '''[RESERVED]''',
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase :Optional[int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , _A : Optional[int]=chr(lowerCamelCase__ ) , _A : Optional[Any]=chr(lowerCamelCase__ ) , _A : Optional[Any]=chr(lowerCamelCase__ ) , _A : Dict=chr(lowerCamelCase__ ) , _A : List[Any]=chr(lowerCamelCase__ ) , _A : Dict=chr(lowerCamelCase__ ) , _A : Union[str, Any]=False , _A : int=2048 , **_A : List[str] , ) -> Any:
__magic_name__ : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
__magic_name__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__magic_name__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
__magic_name__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
__magic_name__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , model_max_length=lowerCamelCase__ , **lowerCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
__magic_name__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__magic_name__ : str = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__magic_name__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__magic_name__ : Any = UNICODE_VOCAB_SIZE
__magic_name__ : List[str] = len(self._special_codepoints )
@property
def __lowerCAmelCase ( self : Any ) -> Tuple:
return self._unicode_vocab_size
def __lowerCAmelCase ( self : Optional[int] , _A : str ) -> Dict:
return list(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] , _A : str ) -> Optional[Any]:
try:
return ord(lowerCamelCase__ )
except TypeError:
raise ValueError(F'invalid token: \'{token}\'' )
def __lowerCAmelCase ( self : Any , _A : int ) -> Union[str, Any]:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCamelCase__ )
except TypeError:
raise ValueError(F'invalid id: {index}' )
def __lowerCAmelCase ( self : Tuple , _A : Optional[int] ) -> Optional[Any]:
return "".join(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = [self.sep_token_id]
__magic_name__ : Any = [self.cls_token_id]
__magic_name__ : Dict = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowerCAmelCase ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
__magic_name__ : Tuple = [1] + ([0] * len(lowerCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCamelCase__ )) + [1]
return result
def __lowerCAmelCase ( self : int , _A : List[int] , _A : Optional[List[int]] = None ) -> Any:
__magic_name__ : List[Any] = [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Tuple = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowerCAmelCase ( self : Tuple , _A : str , _A : Optional[str] = None ) -> str:
return ()
| 561 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ) -> float:
"""simple docstring"""
UpperCAmelCase_ : int = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowercase__ )] )
UpperCAmelCase_ : List[Any] = np.array(lowercase__ )
UpperCAmelCase_ : Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowercase__ ) ) , x.transpose() ) , lowercase__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ) -> float:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = (1, 2, 1)
UpperCAmelCase_ : Optional[int] = (1, 1, 0, 7)
UpperCAmelCase_ : Optional[int] = SARIMAX(
lowercase__ , exog=lowercase__ , order=lowercase__ , seasonal_order=lowercase__ )
UpperCAmelCase_ : Tuple = model.fit(disp=lowercase__ , maxiter=6_00 , method="nm" )
UpperCAmelCase_ : int = model_fit.predict(1 , len(lowercase__ ) , exog=[test_match] )
return result[0]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ) -> float:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowercase__ , lowercase__ )
UpperCAmelCase_ : Optional[int] = regressor.predict(lowercase__ )
return y_pred[0]
def a__ ( _SCREAMING_SNAKE_CASE : list ) -> float:
"""simple docstring"""
train_user.sort()
UpperCAmelCase_ : List[Any] = np.percentile(lowercase__ , 25 )
UpperCAmelCase_ : Optional[Any] = np.percentile(lowercase__ , 75 )
UpperCAmelCase_ : Optional[int] = qa - qa
UpperCAmelCase_ : Tuple = qa - (iqr * 0.1)
return low_lim
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : float ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase_ : List[str] = not_safe + 1
else:
if abs(abs(lowercase__ ) - abs(lowercase__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowerCamelCase = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
_lowerCamelCase = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
_lowerCamelCase = Normalizer().fit_transform(data_input_df.values)
# split data
_lowerCamelCase = normalize_df[:, 2].tolist()
_lowerCamelCase = normalize_df[:, 0].tolist()
_lowerCamelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowerCamelCase = normalize_df[:, [1, 2]].tolist()
_lowerCamelCase = x[: len(x) - 1]
_lowerCamelCase = x[len(x) - 1 :]
# for linear regression & sarimax
_lowerCamelCase = total_date[: len(total_date) - 1]
_lowerCamelCase = total_user[: len(total_user) - 1]
_lowerCamelCase = total_match[: len(total_match) - 1]
_lowerCamelCase = total_date[len(total_date) - 1 :]
_lowerCamelCase = total_user[len(total_user) - 1 :]
_lowerCamelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowerCamelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowerCamelCase = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 71 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[str] = emb.weight.shape
UpperCAmelCase_: Tuple = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
UpperCAmelCase_: Optional[Any] = emb.weight.data
return lin_layer
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: List[str]=None ):
"""simple docstring"""
UpperCAmelCase_: int = {}
for old_key in state_dict.keys():
UpperCAmelCase_: Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_: Optional[int] = key.replace("""moe_layer.experts.0""" , F'ffn.experts.expert_{expert_idx}' )
else:
UpperCAmelCase_: Tuple = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
UpperCAmelCase_: Optional[Any] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
UpperCAmelCase_: List[Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
UpperCAmelCase_: List[str] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
UpperCAmelCase_: Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_: Optional[int] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
UpperCAmelCase_: Dict = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
UpperCAmelCase_: Tuple = state_dict[old_key]
return new_dict
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: int , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: str = WEIGHTS_NAME ):
"""simple docstring"""
UpperCAmelCase_: Dict = []
UpperCAmelCase_: Optional[Any] = 0
os.makedirs(lowercase__ , exist_ok=lowercase__ )
for expert in range(lowercase__ ):
UpperCAmelCase_: Optional[int] = switch_checkpoint_path + F'-rank-{expert}.pt'
if os.path.isfile(lowercase__ ):
UpperCAmelCase_: Union[str, Any] = torch.load(lowercase__ )["""model"""]
remove_ignore_keys_(lowercase__ )
UpperCAmelCase_: Union[str, Any] = rename_fairseq_keys(lowercase__ , lowercase__ )
UpperCAmelCase_: Optional[Any] = os.path.join(
lowercase__ , weights_name.replace(""".bin""" , F'-{len(lowercase__ )+1:05d}-of-???.bin' ) )
torch.save(lowercase__ , lowercase__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowercase__ )[0]].dtype )
# Add the last block
UpperCAmelCase_: Optional[int] = os.path.join(lowercase__ , weights_name.replace(""".bin""" , F'-{len(lowercase__ )+1:05d}-of-???.bin' ) )
UpperCAmelCase_: Union[str, Any] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(lowercase__ )
UpperCAmelCase_: int = rename_fairseq_keys(lowercase__ , lowercase__ )
UpperCAmelCase_: List[Any] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowercase__ ) == 1:
UpperCAmelCase_: Dict = os.path.join(lowercase__ , lowercase__ )
torch.save(lowercase__ , lowercase__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowercase__ , lowercase__ )
# Otherwise, let's build the index
UpperCAmelCase_: Union[str, Any] = {}
for idx, shard in enumerate(lowercase__ ):
UpperCAmelCase_: Union[str, Any] = weights_name.replace(""".bin""" , F'-{idx+1:05d}-of-{len(lowercase__ ):05d}.bin' )
UpperCAmelCase_: str = os.path.join(lowercase__ , weights_name.replace(""".bin""" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
for key in shard:
UpperCAmelCase_: Any = shard_file
# Add the metadata
UpperCAmelCase_: List[Any] = {"""total_size""": total_size}
UpperCAmelCase_: str = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase_: List[Any] = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + """\n"""
f.write(lowercase__ )
return metadata, index
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a : str = parser.parse_args()
a ,a : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
a : int = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
a : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 556 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 0 |
from timeit import timeit
def __a ( A__ : int ):
if number < 0:
raise ValueError("the value of input must not be negative" )
SCREAMING_SNAKE_CASE = 0
while number:
number &= number - 1
result += 1
return result
def __a ( A__ : int ):
if number < 0:
raise ValueError("the value of input must not be negative" )
SCREAMING_SNAKE_CASE = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __a ( ):
def do_benchmark(A__ : int ) -> None:
SCREAMING_SNAKE_CASE = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowercase__ ) = }" )
SCREAMING_SNAKE_CASE = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowercase__ )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowercase__ ) = }" )
SCREAMING_SNAKE_CASE = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowercase__ , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 16 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , 'num_attention_heads' ) )
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=64 , lowerCAmelCase=3 , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=16 , lowerCAmelCase=[128, 256, 384] , lowerCAmelCase=[4, 6, 8] , lowerCAmelCase=[2, 3, 4] , lowerCAmelCase=[16, 16, 16] , lowerCAmelCase=0 , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ) -> Tuple:
'''simple docstring'''
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =num_channels
_lowercase =kernel_size
_lowercase =stride
_lowercase =padding
_lowercase =hidden_sizes
_lowercase =num_attention_heads
_lowercase =depths
_lowercase =key_dim
_lowercase =drop_path_rate
_lowercase =patch_size
_lowercase =attention_ratio
_lowercase =mlp_ratio
_lowercase =initializer_range
_lowercase =[
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowercase =is_training
_lowercase =use_labels
_lowercase =num_labels
_lowercase =initializer_range
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.num_labels )
_lowercase =self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> int:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =LevitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowercase =model(lowerCamelCase__ )
_lowercase =(self.image_size, self.image_size)
_lowercase =image_size[0], image_size[1]
for _ in range(4 ):
_lowercase =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_lowercase =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =self.num_labels
_lowercase =LevitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowercase =model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
_lowercase =config_and_inputs
_lowercase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_a = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =LevitModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def A__ ( self ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def A__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCamelCase__ )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> Any:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_lowercase =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowercase =outputs.hidden_states
_lowercase =len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
_lowercase =(self.model_tester.image_size, self.model_tester.image_size)
_lowercase =image_size[0], image_size[1]
for _ in range(4 ):
_lowercase =floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_lowercase =floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase =True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ) -> str:
'''simple docstring'''
pass
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> List[str]:
'''simple docstring'''
_lowercase =super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowercase =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_lowercase =self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_lowercase =model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowercase =False
_lowercase =True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowercase =model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
_lowercase =self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_lowercase =model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =[
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
_lowercase =problem_type["""title"""]
_lowercase =problem_type["""num_labels"""]
_lowercase =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_lowercase =self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
_lowercase =inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
_lowercase =inputs["""labels"""].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
_lowercase =model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =LevitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def a ( ) -> str:
"""simple docstring"""
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> int:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase__ )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowercase =model(**lowerCamelCase__ )
# verify the logits
_lowercase =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowercase =torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 291 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 0 |
"""simple docstring"""
import requests
def a ( __snake_case : str, __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :Dict = {"""Content-Type""": """application/json"""}
UpperCAmelCase_ :Optional[Any] = requests.post(lowercase__, json={'''text''': message_body}, headers=lowercase__ )
if response.status_code != 200:
UpperCAmelCase_ :Union[str, Any] = (
"""Request to slack returned an error """
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(lowercase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 608 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 0 |
import os
from distutils.util import strtobool
def A__ ( _a : List[str] , _a : Union[str, Any] ):
'''simple docstring'''
for e in env_keys:
snake_case__ : Optional[Any] =int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def A__ ( _a : Optional[int] , _a : int=False ):
'''simple docstring'''
snake_case__ : Optional[Any] =os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def A__ ( _a : Tuple , _a : Dict="no" ):
'''simple docstring'''
snake_case__ : Optional[int] =os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 385 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 0 |
from __future__ import annotations
import time
import numpy as np
_lowerCamelCase : Optional[int] = [8, 5, 9, 7]
_lowerCamelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCamelCase : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : list[int], _UpperCAmelCase : list[list[int]], _UpperCAmelCase : list[list[int]], ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = claim_vector
SCREAMING_SNAKE_CASE__ : str = allocated_resources_table
SCREAMING_SNAKE_CASE__ : Optional[Any] = maximum_claim_table
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
return {self.__need().index(lowerCamelCase__ ): i for i in self.__need()}
def A_ ( self : Any, **_UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.__need()
SCREAMING_SNAKE_CASE__ : Any = self.__allocated_resources_table
SCREAMING_SNAKE_CASE__ : Tuple = self.__available_resources()
SCREAMING_SNAKE_CASE__ : List[str] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 5_0 + "\n" )
while need_list:
SCREAMING_SNAKE_CASE__ : List[str] = False
for each_need in need_list:
SCREAMING_SNAKE_CASE__ : List[Any] = True
for index, need in enumerate(lowerCamelCase__ ):
if need > available_resources[index]:
SCREAMING_SNAKE_CASE__ : int = False
break
if execution:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
SCREAMING_SNAKE_CASE__ : Optional[Any] = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(lowerCamelCase__ )
# update available/freed resources stack
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(lowerCamelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(lowerCamelCase__ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def A_ ( self : Dict ) -> int:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(lowerCamelCase__ ) + 1}'''
+ " ".join(F'''{it:>8}''' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(lowerCamelCase__ ) + 1}'''
+ " ".join(F'''{it:>8}''' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(lowerCamelCase__ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(lowerCamelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _A (__a , __a ) -> str | Literal[False]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = list(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE_ : str = """_"""
if count > 1:
return False
else:
return "".join(lowercase__ )
def _A (__a ) -> list[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
while True:
SCREAMING_SNAKE_CASE_ : Tuple = ["""$"""] * len(lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
SCREAMING_SNAKE_CASE_ : str = compare_string(binary[i] , binary[j] )
if k is False:
SCREAMING_SNAKE_CASE_ : str = """*"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """*"""
temp.append('''X''' )
for i in range(len(lowercase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowercase__ ) == 0:
return pi
SCREAMING_SNAKE_CASE_ : Tuple = list(set(lowercase__ ) )
def _A (__a , __a ) -> list[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
for minterm in minterms:
SCREAMING_SNAKE_CASE_ : Any = """"""
for _ in range(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowercase__ )
return temp
def _A (__a , __a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = list(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _A (__a , __a ) -> list[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0] * len(lowercase__ )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Dict = -1
for j in range(len(lowercase__ ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE_ : int = j
if count == 1:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
for i in range(len(lowercase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE_ : Tuple = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = -1
SCREAMING_SNAKE_CASE_ : Any = 0
for i in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE_ : Dict = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE_ : List[Any] = count_n
SCREAMING_SNAKE_CASE_ : Optional[int] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
def _A (__a , __a ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[0 for x in range(len(lowercase__ ) )] for x in range(len(lowercase__ ) )]
for i in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prime_implicants[i].count('''_''' )
for j in range(len(lowercase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
return chart
def _A () -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = int(input('''Enter the no. of variables\n''' ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
float(lowercase__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
SCREAMING_SNAKE_CASE_ : List[Any] = decimal_to_binary(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = check(lowercase__ )
print('''Prime Implicants are:''' )
print(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = prime_implicant_chart(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = selection(lowercase__ , lowercase__ )
print('''Essential Prime Implicants are:''' )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 512 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Dict = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A = field(
default='''NER''' ,metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A = field(default=__UpperCamelCase ,metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} ,)
A = field(
default=128 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
__lowerCamelCase = import_module("""tasks""" )
try:
__lowerCamelCase = getattr(lowercase__ , model_args.task_type )
__lowerCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__lowerCamelCase = token_classification_task.get_labels(data_args.labels )
__lowerCamelCase = dict(enumerate(lowercase__ ) )
__lowerCamelCase = len(lowercase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , idalabel=lowercase__ , labelaid={label: i for i, label in enumerate(lowercase__ )} , cache_dir=model_args.cache_dir , )
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__lowerCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=lowercase__ , data_dir=data_args.data_dir , tokenizer=lowercase__ , labels=lowercase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=lowercase__ , data_dir=data_args.data_dir , tokenizer=lowercase__ , labels=lowercase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_A: np.ndarray , _A: np.ndarray ) -> Tuple[List[int], List[int]]:
__lowerCamelCase = np.argmax(lowercase__ , axis=2 )
__lowerCamelCase = preds.shape
__lowerCamelCase = [[] for _ in range(lowercase__ )]
__lowerCamelCase = [[] for _ in range(lowercase__ )]
for i in range(lowercase__ ):
for j in range(lowercase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_A: EvalPrediction ) -> Dict:
__lowerCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowercase__ , lowercase__ ),
"precision": precision_score(lowercase__ , lowercase__ ),
"recall": recall_score(lowercase__ , lowercase__ ),
"f1": fa_score(lowercase__ , lowercase__ ),
}
# Data collator
__lowerCamelCase = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCamelCase = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCamelCase = trainer.evaluate()
__lowerCamelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowercase__ , lowercase__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowercase__ )
# Predict
if training_args.do_predict:
__lowerCamelCase = TokenClassificationDataset(
token_classification_task=lowercase__ , data_dir=data_args.data_dir , tokenizer=lowercase__ , labels=lowercase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__lowerCamelCase = trainer.predict(lowercase__ )
__lowerCamelCase = align_predictions(lowercase__ , lowercase__ )
__lowerCamelCase = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , lowercase__ , lowercase__ )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
__lowerCamelCase = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(lowercase__ , lowercase__ , lowercase__ )
return results
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 479 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : int = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
def __init__( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: Any=30 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: str=True ,__lowerCAmelCase: List[str]=32 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Optional[int]=4 ,__lowerCAmelCase: List[str]=37 ,__lowerCAmelCase: int="gelu" ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: int=10 ,__lowerCAmelCase: Dict=0.02 ,__lowerCAmelCase: Optional[int]=3 ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: List[str]=2 ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Any = scope
_lowerCamelCase : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCamelCase : Any = (image_size // patch_size) ** 2
_lowerCamelCase : Optional[Any] = num_patches + 2
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : int = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Any ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _lowercase ( self: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = TFDeiTModel(config=__lowerCAmelCase )
_lowerCamelCase : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Dict = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Any = 1
_lowerCamelCase : Union[str, Any] = TFDeiTForMaskedImageModeling(__lowerCAmelCase )
_lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.type_sequence_label_size
_lowerCamelCase : List[Any] = TFDeiTForImageClassification(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : int = 1
_lowerCamelCase : Dict = TFDeiTForImageClassification(__lowerCAmelCase )
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Dict = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = TFDeiTModelTester(self )
_lowerCamelCase : str = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,tf.keras.layers.Dense ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Union[str, Any]=False ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super()._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self: int ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = TFDeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Any ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
_lowerCamelCase : List[Any] = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=__lowerCAmelCase ,return_tensors="tf" )
# forward pass
_lowerCamelCase : List[str] = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : List[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Dict = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase : Optional[Any] = get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = mock.Mock()
_lowerCamelCase : Union[str, Any] = 500
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : str = HTTPError
_lowerCamelCase : Dict = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" ,return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def _lowercase ( cls: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def _lowercase ( cls: Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("test-feature-extractor" ,use_auth_token=self._token )
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase ,repo_id="test-feature-extractor" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
_lowerCamelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" ,use_auth_token=self._token )
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase ,repo_id="valid_org/test-feature-extractor-org" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
_lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
_lowerCamelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} ,)
_lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" ,trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,"CustomFeatureExtractor" )
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
_lowerCamelCase : Optional[Any] = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
_lowerCamelCase : Optional[int] = current_sum - array[i] + array[i + k]
_lowerCamelCase : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_lowerCAmelCase : List[Any] = [randint(-1000, 1000) for i in range(100)]
_lowerCAmelCase : Union[str, Any] = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : int = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : str = val
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCamelCase : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_lowerCamelCase : str = value
else:
_lowerCamelCase : Any = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : str = ""
if is_panoptic:
_lowerCamelCase : Any = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : int = in_proj_weight[:256, :]
_lowerCamelCase : Dict = in_proj_bias[:256]
_lowerCamelCase : int = in_proj_weight[256:512, :]
_lowerCamelCase : Tuple = in_proj_bias[256:512]
_lowerCamelCase : Tuple = in_proj_weight[-256:, :]
_lowerCamelCase : Optional[int] = in_proj_bias[-256:]
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowerCamelCase : Optional[int] = "resnet101"
if "dc5" in model_name:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = "panoptic" in model_name
if is_panoptic:
_lowerCamelCase : Optional[int] = 250
else:
_lowerCamelCase : Optional[Any] = 91
_lowerCamelCase : List[Any] = "huggingface/label-files"
_lowerCamelCase : List[Any] = "coco-detection-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Any = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
_lowerCamelCase : Any = "coco_panoptic" if is_panoptic else "coco_detection"
_lowerCamelCase : Tuple = ConditionalDetrImageProcessor(format=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : str = image_processor(images=_lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = encoding["pixel_values"]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
_lowerCamelCase : int = torch.hub.load("DeppMeng/ConditionalDETR" , _lowerCamelCase , pretrained=_lowerCamelCase ).eval()
_lowerCamelCase : Optional[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowerCamelCase : Tuple = "conditional_detr." + src
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : int = rename_backbone_keys(_lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase , is_panoptic=_lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : Tuple = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_lowerCamelCase : List[Any] = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : List[str] = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Tuple = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_lowerCamelCase : int = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_lowerCamelCase : int = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : Any = ConditionalDetrForSegmentation(_lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCamelCase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_lowerCamelCase : List[Any] = conditional_detr(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = model(_lowerCamelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : int = TapasConfig.from_json_file(_lowerCamelCase )
# set absolute/relative position embeddings parameter
_lowerCamelCase : int = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_lowerCamelCase : Tuple = TapasForQuestionAnswering(config=_lowerCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
_lowerCamelCase : List[Any] = 4
_lowerCamelCase : List[str] = True
# hparam_utils.py hparams
_lowerCamelCase : Tuple = 0.6_6_4_6_9_4
_lowerCamelCase : List[str] = 0.2_0_7_9_5_1
_lowerCamelCase : Optional[int] = 0.1_2_1_1_9_4
_lowerCamelCase : int = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : List[str] = False
_lowerCamelCase : int = 0.0_3_5_2_5_1_3
_lowerCamelCase : List[str] = TapasForQuestionAnswering(config=_lowerCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_lowerCamelCase : Tuple = 4
_lowerCamelCase : Union[str, Any] = False
# hparam_utils.py hparams
_lowerCamelCase : Dict = 3_6.4_5_1_9
_lowerCamelCase : Dict = 0.9_0_3_4_2_1
_lowerCamelCase : Any = 2_2_2.0_8_8
_lowerCamelCase : int = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : str = True
_lowerCamelCase : Optional[int] = 0.7_6_3_1_4_1
_lowerCamelCase : Optional[Any] = TapasForQuestionAnswering(config=_lowerCamelCase )
elif task == "TABFACT":
_lowerCamelCase : Optional[int] = TapasForSequenceClassification(config=_lowerCamelCase )
elif task == "MLM":
_lowerCamelCase : Optional[int] = TapasForMaskedLM(config=_lowerCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
_lowerCamelCase : List[str] = TapasModel(config=_lowerCamelCase )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCamelCase )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_lowerCamelCase : Union[str, Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(_lowerCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: str = "cpu" ,__lowerCAmelCase: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_lowerCamelCase : List[str] = device
_lowerCamelCase : Dict = CLIPTokenizerFast.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
_lowerCamelCase : List[Any] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
_lowerCamelCase : Tuple = torchvision.transforms.Normalize(self.image_mean ,self.image_std )
_lowerCamelCase : Optional[Any] = torchvision.transforms.Resize(224 )
_lowerCamelCase : List[Any] = torchvision.transforms.CenterCrop(224 )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.resize(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.center_crop(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.normalize(__lowerCAmelCase )
return images
def __call__( self: Optional[int] ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: List[Any]=None ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.tokenizer(text=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Any = self.preprocess_img(__lowerCAmelCase )
_lowerCamelCase : List[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A_ ( nn.Module ):
def __init__( self: Tuple ,__lowerCAmelCase: Any=10 ,__lowerCAmelCase: List[Any]=0.01 ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Union[str, Any]="image" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: int=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: Any=False ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Any = device if device else get_device()
if vqgan:
_lowerCamelCase : Any = vqgan
else:
_lowerCamelCase : List[str] = load_vqgan(self.device ,conf_path=__lowerCAmelCase ,ckpt_path=__lowerCAmelCase )
self.vqgan.eval()
if clip:
_lowerCamelCase : Any = clip
else:
_lowerCamelCase : List[Any] = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
_lowerCamelCase : Optional[int] = ProcessorGradientFlow(device=self.device )
_lowerCamelCase : int = iterations
_lowerCamelCase : Tuple = lr
_lowerCamelCase : Any = log
_lowerCamelCase : Dict = make_grid
_lowerCamelCase : Optional[int] = return_val
_lowerCamelCase : Union[str, Any] = quantize
_lowerCamelCase : List[str] = self.vqgan.decoder.z_shape
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Dict=5 ,__lowerCAmelCase: List[Any]=True ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
if output_path is None:
_lowerCamelCase : Optional[Any] = "./animation.gif"
if input_path is None:
_lowerCamelCase : Optional[Any] = self.save_path
_lowerCamelCase : Dict = sorted(glob(input_path + "/*" ) )
if not len(__lowerCAmelCase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__lowerCAmelCase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
_lowerCamelCase : Any = total_duration / len(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [frame_duration] * len(__lowerCAmelCase )
if extend_frames:
_lowerCamelCase : Dict = 1.5
_lowerCamelCase : List[Any] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__lowerCAmelCase ) )
imageio.mimsave(__lowerCAmelCase ,__lowerCAmelCase ,duration=__lowerCAmelCase )
print(F"""gif saved to {output_path}""" )
def _lowercase ( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
_lowerCamelCase : Tuple = preprocess(Image.open(__lowerCAmelCase ) ,target_image_size=256 ).to(self.device )
_lowerCamelCase : str = preprocess_vqgan(__lowerCAmelCase )
_lowerCamelCase, *_lowerCamelCase : List[str] = self.vqgan.encode(__lowerCAmelCase )
return z
def _lowercase ( self: Tuple ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCamelCase : str = base_latent + transform_vector
if self.quantize:
_lowerCamelCase, *_lowerCamelCase : List[Any] = self.vqgan.quantize(__lowerCAmelCase )
else:
_lowerCamelCase : str = trans_latent
return self.vqgan.decode(__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Dict=None ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.clip_preprocessor(text=__lowerCAmelCase ,images=__lowerCAmelCase ,return_tensors="pt" ,padding=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.clip(**__lowerCAmelCase )
_lowerCamelCase : Dict = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase : Union[str, Any] = similarity_logits * weights
return similarity_logits.sum()
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"] ,__lowerCAmelCase ,weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
_lowerCamelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"] ,__lowerCAmelCase ,weights=neg_prompts["weights"] )
else:
_lowerCamelCase : Union[str, Any] = torch.tensor([1] ,device=self.device )
_lowerCamelCase : List[str] = -torch.log(__lowerCAmelCase ) + torch.log(__lowerCAmelCase )
return loss
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = torch.randn_like(self.latent ,requires_grad=__lowerCAmelCase ,device=self.device )
_lowerCamelCase : Union[str, Any] = torch.optim.Adam([vector] ,lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase : List[str] = self._add_vector(__lowerCAmelCase )
_lowerCamelCase : Dict = loop_post_process(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self._get_CLIP_loss(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
print("CLIP loss" ,__lowerCAmelCase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__lowerCAmelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
wandb.init(reinit=__lowerCAmelCase ,project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
_lowerCamelCase : Dict = Image.open(__lowerCAmelCase )
_lowerCamelCase : int = image.resize((256, 256) )
wandb.log("Original Image" ,wandb.Image(__lowerCAmelCase ) )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ):
'''simple docstring'''
if not prompts:
return []
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Any = []
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__lowerCAmelCase ,(tuple, list) ):
_lowerCamelCase : Optional[int] = prompt[0]
_lowerCamelCase : Optional[int] = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = prompt.split(":" )
_lowerCamelCase : Dict = float(__lowerCAmelCase )
else:
_lowerCamelCase : Any = prompt
_lowerCamelCase : int = 1.0
processed_prompts.append(__lowerCAmelCase )
weights.append(__lowerCAmelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__lowerCAmelCase ,device=self.device ),
}
def _lowercase ( self: str ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Tuple=False ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: str=None ,):
'''simple docstring'''
if image_path:
_lowerCamelCase : Optional[Any] = self._get_latent(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = torch.randn(self.latent_dim ,device=self.device )
if self.log:
self._init_logging(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase : Union[str, Any] = self.process_prompts(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.process_prompts(__lowerCAmelCase )
if save_final and save_path is None:
_lowerCamelCase : Union[str, Any] = os.path.join("./outputs/" ,"_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
else:
_lowerCamelCase : List[Any] = save_path + "_" + get_timestamp()
os.makedirs(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = save_path
_lowerCamelCase : Optional[int] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__lowerCAmelCase ) )
_lowerCamelCase : int = loop_post_process(__lowerCAmelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) ):
if show_intermediate:
show_pil(__lowerCAmelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path ,F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"Image": wandb.Image(__lowerCAmelCase )} )
if show_final:
show_pil(__lowerCAmelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path ,F"""iter_{iter:03d}_final.png""" ) )
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = OpenAIGPTTokenizer
lowerCAmelCase__ = OpenAIGPTTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Optional[Any] = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) )
with open(self.merges_file ,"w" ) as fp:
fp.write("\n".join(__lowerCAmelCase ) )
def _lowercase ( self: Any ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return "lower newer", "lower newer"
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
_lowerCamelCase : int = "lower"
_lowerCamelCase : Optional[int] = ["low", "er</w>"]
_lowerCamelCase : int = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = tokens + ["<unk>"]
_lowerCamelCase : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Any=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
# Simple input
_lowerCamelCase : Any = "This is a simple input"
_lowerCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : int = ("This is a simple input", "This is a pair")
_lowerCamelCase : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase ,tokenizer_r.encode ,__lowerCAmelCase ,max_length=__lowerCAmelCase ,padding="max_length" )
# Simple input
self.assertRaises(__lowerCAmelCase ,tokenizer_r.encode_plus ,__lowerCAmelCase ,max_length=__lowerCAmelCase ,padding="max_length" )
# Simple input
self.assertRaises(
__lowerCAmelCase ,tokenizer_r.batch_encode_plus ,__lowerCAmelCase ,max_length=__lowerCAmelCase ,padding="max_length" ,)
# Pair input
self.assertRaises(__lowerCAmelCase ,tokenizer_r.encode ,__lowerCAmelCase ,max_length=__lowerCAmelCase ,padding="max_length" )
# Pair input
self.assertRaises(__lowerCAmelCase ,tokenizer_r.encode_plus ,__lowerCAmelCase ,max_length=__lowerCAmelCase ,padding="max_length" )
# Pair input
self.assertRaises(
__lowerCAmelCase ,tokenizer_r.batch_encode_plus ,__lowerCAmelCase ,max_length=__lowerCAmelCase ,padding="max_length" ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( _a ):
pass
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase : Optional[int] = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = FlaxAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
@slow
def _lowercase ( self: int ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCAmelCase ):
_lowerCamelCase : int = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = FlaxAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
@slow
def _lowercase ( self: int ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = FlaxBertModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer("Do you support jax jitted function?" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCAmelCase: Union[str, Any] ):
return model(**__lowerCAmelCase )
eval(**__lowerCAmelCase ).block_until_ready()
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[Any] = FlaxRobertaModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer("Do you support jax jitted function?" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCAmelCase: int ):
return model(**__lowerCAmelCase )
eval(**__lowerCAmelCase ).block_until_ready()
def _lowercase ( self: Any ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = FlaxAutoModel.from_pretrained("bert-base" )
def _lowercase ( self: int ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : Union[str, Any] = FlaxAutoModel.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: int ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" ,):
_lowerCamelCase : Union[str, Any] = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(__lowerCAmelCase ,"Use `from_pt=True` to load this model" ):
_lowerCamelCase : Any = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=_lowerCamelCase , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=_lowerCamelCase , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=_lowerCamelCase , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=_lowerCamelCase , default=0 , help="cuda_id." , )
_lowerCamelCase : Optional[int] = parser.parse_args()
return args
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if not len(_lowerCamelCase ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_lowerCamelCase, _lowerCamelCase : str = imgs[0].size
_lowerCamelCase : List[Any] = Image.new("RGB" , size=(cols * w, rows * h) )
_lowerCamelCase, _lowerCamelCase : List[Any] = grid.size
for i, img in enumerate(_lowerCamelCase ):
grid.paste(_lowerCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="robotic cat with wings" , _lowerCamelCase=7.5 , _lowerCamelCase=50 , _lowerCamelCase=1 , _lowerCamelCase=42 , ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = torch.Generator(pipeline.device ).manual_seed(_lowerCamelCase )
_lowerCamelCase : str = pipeline(
_lowerCamelCase , guidance_scale=_lowerCamelCase , num_inference_steps=_lowerCamelCase , generator=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , ).images
_lowerCamelCase : int = int(math.sqrt(_lowerCamelCase ) )
_lowerCamelCase : str = image_grid(_lowerCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_lowerCAmelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
_lowerCAmelCase : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
_lowerCAmelCase : Dict = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
_lowerCAmelCase : Any = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
_lowerCAmelCase : int = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_lowerCAmelCase : int = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
_lowerCAmelCase : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
_lowerCAmelCase : int = unet.to(torch.device('''cuda''', args.cuda_id))
_lowerCAmelCase : List[Any] = pipeline.to(unet.device)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
_lowerCAmelCase : Dict = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return abs(_lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = y, x % y
return abs(_lowerCamelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
try:
_lowerCamelCase : Any = input("Enter two integers separated by comma (,): " ).split("," )
_lowerCamelCase : Dict = int(nums[0] )
_lowerCamelCase : Tuple = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(_lowerCamelCase , _lowerCamelCase )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCamelCase , _lowerCamelCase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCAmelCase__ = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCAmelCase__ = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCAmelCase__ = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCAmelCase__ = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCAmelCase__ = field(
default=1_0_0_0_0 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCAmelCase__ = field(default=2E-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCAmelCase__ = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCAmelCase__ = field(
default=7_5_0 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCAmelCase__ = field(
default=1_6 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCAmelCase__ = field(default=5_0_0_0_0 , metadata={'help': 'Maximum number of training steps.'} )
lowerCAmelCase__ = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCAmelCase__ = field(default=1_0_2_4 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCAmelCase__ = field(
default=1_0_2_4 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCAmelCase__ = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCAmelCase__ = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCAmelCase__ = field(default=1_0_2_4 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCAmelCase__ = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCAmelCase__ = field(default=2_5_6 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCAmelCase__ = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCAmelCase__ = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCAmelCase__ = field(default=1_0 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCAmelCase__ = field(
default=2_0_0 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCAmelCase__ = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCAmelCase__ = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCAmelCase__ = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default=_a , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCAmelCase__ = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCAmelCase__ = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCAmelCase__ = field(
default=1_0_0_0_0_0 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCAmelCase__ = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCAmelCase__ = field(
default=1_0_0_0 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=1_0_0 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCAmelCase__ = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCAmelCase__ = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCAmelCase__ = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCAmelCase__ = field(default=2_0_0_0_0_0 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCAmelCase__ = field(
default=3_2_7_6_8 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCAmelCase__ = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCAmelCase__ = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCAmelCase__ = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Push saved tokenizer to the hub.'} )
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'tokenizer']
lowerCAmelCase__ = 'ViltImageProcessor'
lowerCAmelCase__ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : Optional[Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.image_processor
def __call__( self: str ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Union[bool, str, PaddingStrategy] = False ,__lowerCAmelCase: Union[bool, str, TruncationStrategy] = None ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.tokenizer(
text=__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase ,stride=__lowerCAmelCase ,pad_to_multiple_of=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,return_overflowing_tokens=__lowerCAmelCase ,return_special_tokens_mask=__lowerCAmelCase ,return_offsets_mapping=__lowerCAmelCase ,return_length=__lowerCAmelCase ,verbose=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ,)
# add pixel_values + pixel_mask
_lowerCamelCase : int = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase )
encoding.update(__lowerCAmelCase )
return encoding
def _lowercase ( self: Any ,*__lowerCAmelCase: str ,**__lowerCAmelCase: int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Dict ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase ,**__lowerCAmelCase )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.tokenizer.model_input_names
_lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,__lowerCAmelCase ,)
return self.image_processor_class
@property
def _lowercase ( self: str ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,__lowerCAmelCase ,)
return self.image_processor
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowerCAmelCase : Dict = TypeVar('''T''')
class A_ ( Generic[T] ):
lowerCAmelCase__ = 42 # Cache store of keys
lowerCAmelCase__ = 42 # References of the keys in cache
lowerCAmelCase__ = 1_0 # Maximum capacity of cache
def __init__( self: int ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = deque()
_lowerCamelCase : Union[str, Any] = set()
if not n:
_lowerCamelCase : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_lowerCamelCase : Tuple = n
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCamelCase : int = self.dq_store.pop()
self.key_reference.remove(__lowerCAmelCase )
else:
self.dq_store.remove(__lowerCAmelCase )
self.dq_store.appendleft(__lowerCAmelCase )
self.key_reference.add(__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
for k in self.dq_store:
print(__lowerCAmelCase )
def __repr__( self: Tuple ):
'''simple docstring'''
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class A_ ( unittest.TestCase , _a ):
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = load_tool("text-to-speech" )
self.tool.setup()
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Tuple = self.tool("hey" )
_lowerCamelCase : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) ,) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : str = self.tool("hey" )
_lowerCamelCase : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) ,) )
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
_lowerCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_lowerCamelCase : int = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_lowerCamelCase , output_all_encodings=_lowerCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_lowerCamelCase : Dict = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
_lowerCamelCase : Union[str, Any] = os.path.join(get_home_dir() , "models" )
_lowerCamelCase : List[str] = _load_vocab(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , cls=_lowerCamelCase )
_lowerCamelCase : int = nlp.model.BERTModel(
_lowerCamelCase , len(_lowerCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_lowerCamelCase , use_token_type_embed=_lowerCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_lowerCamelCase , use_decoder=_lowerCamelCase , )
original_bort.load_parameters(_lowerCamelCase , cast_dtype=_lowerCamelCase , ignore_extra=_lowerCamelCase )
_lowerCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
_lowerCamelCase : Optional[Any] = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.0_2,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_lowerCamelCase ),
}
_lowerCamelCase : int = BertConfig.from_dict(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = BertForMaskedLM(_lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_lowerCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : List[str] = hf_param.shape
_lowerCamelCase : Tuple = to_torch(params[gluon_param] )
_lowerCamelCase : str = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
_lowerCamelCase : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
_lowerCamelCase : Any = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
_lowerCamelCase : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
_lowerCamelCase : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_lowerCamelCase : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_lowerCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
_lowerCamelCase : BertSelfAttention = layer.attention.self
_lowerCamelCase : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
_lowerCamelCase : Any = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
_lowerCamelCase : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
_lowerCamelCase : List[str] = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
_lowerCamelCase : str = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
_lowerCamelCase : int = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
_lowerCamelCase : BertSelfOutput = layer.attention.output
_lowerCamelCase : str = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
_lowerCamelCase : Any = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
_lowerCamelCase : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
_lowerCamelCase : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
_lowerCamelCase : BertIntermediate = layer.intermediate
_lowerCamelCase : Optional[int] = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
_lowerCamelCase : List[str] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
_lowerCamelCase : BertOutput = layer.output
_lowerCamelCase : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
_lowerCamelCase : int = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
_lowerCamelCase : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
_lowerCamelCase : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_lowerCamelCase : List[str] = RobertaTokenizer.from_pretrained("roberta-base" )
_lowerCamelCase : Union[str, Any] = tokenizer.encode_plus(_lowerCamelCase )["input_ids"]
# Get gluon output
_lowerCamelCase : Optional[Any] = mx.nd.array([input_ids] )
_lowerCamelCase : Any = original_bort(inputs=_lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_lowerCamelCase )
_lowerCamelCase : Any = BertModel.from_pretrained(_lowerCamelCase )
hf_bort_model.eval()
_lowerCamelCase : Any = tokenizer.encode_plus(_lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = hf_bort_model(**_lowerCamelCase )[0]
_lowerCamelCase : Union[str, Any] = output_gluon[0].asnumpy()
_lowerCamelCase : Optional[Any] = output_hf[0].detach().numpy()
_lowerCamelCase : Optional[int] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_lowerCamelCase : Union[str, Any] = np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , _lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : int = 3
_lowerCamelCase : Tuple = (32, 32)
_lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
return model
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
return model
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Any = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5_006 ,)
return RobertaSeriesModelWithTransformation(__lowerCAmelCase )
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
def extract(*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Tuple ):
class A_ :
def __init__( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = torch.ones([0] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Any = self.dummy_cond_unet
_lowerCamelCase : int = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_vae
_lowerCamelCase : List[str] = self.dummy_text_encoder
_lowerCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_lowerCamelCase : Union[str, Any] = 77
_lowerCamelCase : List[Any] = self.dummy_image.to(__lowerCAmelCase )
_lowerCamelCase : List[str] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowerCamelCase : int = AltDiffusionImgaImgPipeline(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,safety_checker=__lowerCAmelCase ,feature_extractor=self.dummy_extractor ,)
_lowerCamelCase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = alt_pipe.to(__lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = "A painting of a squirrel eating a burger"
_lowerCamelCase : List[str] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : int = alt_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,image=__lowerCAmelCase ,)
_lowerCamelCase : Dict = output.images
_lowerCamelCase : Any = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = alt_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,image=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[str] = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.dummy_cond_unet
_lowerCamelCase : str = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
_lowerCamelCase : int = self.dummy_vae
_lowerCamelCase : int = self.dummy_text_encoder
_lowerCamelCase : List[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_lowerCamelCase : Tuple = 77
_lowerCamelCase : Any = self.dummy_image.to(__lowerCAmelCase )
# put models in fp16
_lowerCamelCase : List[str] = unet.half()
_lowerCamelCase : Union[str, Any] = vae.half()
_lowerCamelCase : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCamelCase : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,safety_checker=__lowerCAmelCase ,feature_extractor=self.dummy_extractor ,)
_lowerCamelCase : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = alt_pipe.to(__lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = "A painting of a squirrel eating a burger"
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = alt_pipe(
[prompt] ,generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="np" ,image=__lowerCAmelCase ,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCamelCase : List[str] = init_image.resize((760, 504) )
_lowerCamelCase : Any = "BAAI/AltDiffusion"
_lowerCamelCase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCAmelCase ,safety_checker=__lowerCAmelCase ,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[Any] = "A fantasy landscape, trending on artstation"
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,strength=0.75 ,guidance_scale=7.5 ,generator=__lowerCAmelCase ,output_type="np" ,)
_lowerCamelCase : Any = output.images[0]
_lowerCamelCase : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_lowerCamelCase : List[Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowercase ( self: str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_lowerCamelCase : Union[str, Any] = init_image.resize((768, 512) )
_lowerCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_lowerCamelCase : Dict = "BAAI/AltDiffusion"
_lowerCamelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCAmelCase ,safety_checker=__lowerCAmelCase ,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[int] = "A fantasy landscape, trending on artstation"
_lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,strength=0.75 ,guidance_scale=7.5 ,generator=__lowerCAmelCase ,output_type="np" ,)
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 1.5
_lowerCamelCase : Tuple = int(factor * num_class_images )
_lowerCamelCase : Union[str, Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowerCamelCase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
_lowerCamelCase : int = client.query(text=_lowerCamelCase )
if len(_lowerCamelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
_lowerCamelCase : str = int(factor * num_images )
_lowerCamelCase : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1 , )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : Any = tqdm(desc="downloading real regularization images" , total=_lowerCamelCase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
_lowerCamelCase : List[Any] = class_images[count]
count += 1
try:
_lowerCamelCase : int = requests.get(images["url"] )
if img.status_code == 200:
_lowerCamelCase : Any = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser("" , add_help=_lowerCamelCase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowerCamelCase , type=_lowerCamelCase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowerCamelCase , type=_lowerCamelCase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowerCamelCase )
return parser.parse_args()
if __name__ == "__main__":
_lowerCAmelCase : Tuple = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Optional[int] = 16
_lowerCAmelCase : List[Any] = 32
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 16 ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : int = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : Any = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : Dict = 8
else:
_lowerCamelCase : Optional[int] = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCamelCase : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : Dict = mocked_dataloaders # noqa: F811
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
_lowerCamelCase : Optional[Any] = 2
# Initialize accelerator
_lowerCamelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Any = config["lr"]
_lowerCamelCase : Tuple = int(config["num_epochs"] )
_lowerCamelCase : Union[str, Any] = int(config["seed"] )
_lowerCamelCase : Optional[Any] = int(config["batch_size"] )
_lowerCamelCase : Tuple = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Dict = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Any = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : Any = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : Dict = model(**_lowerCamelCase )
_lowerCamelCase : List[str] = outputs.loss
_lowerCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_lowerCamelCase : Any = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : str = model(**_lowerCamelCase )
_lowerCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
_lowerCamelCase, _lowerCamelCase : int = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_lowerCamelCase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCamelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _lowerCamelCase )
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : int = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : str = first_str.lower().strip()
_lowerCamelCase : Any = second_str.lower().strip()
# Remove whitespace
_lowerCamelCase : int = first_str.replace(" " , "" )
_lowerCamelCase : List[Any] = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCamelCase : defaultdict[str, int] = defaultdict(_lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : Union[str, Any] = input('''Enter the first string ''').strip()
_lowerCAmelCase : Optional[Any] = input('''Enter the second string ''').strip()
_lowerCAmelCase : Optional[Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> list[list[float]]:
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(_lowerCamelCase ):
if len(_lowerCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_lowerCamelCase ) )
return data_lists
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[float]]:
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for dlist, weight in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase )
_lowerCamelCase : str = max(_lowerCamelCase )
_lowerCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCamelCase : Optional[Any] = F"""Invalid weight of {weight:f} provided"""
raise ValueError(_lowerCamelCase )
score_lists.append(_lowerCamelCase )
return score_lists
def lowerCamelCase_( _lowerCamelCase ) -> list[float]:
'''simple docstring'''
_lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_lowerCamelCase ):
_lowerCamelCase : Optional[Any] = final_scores[j] + ele
return final_scores
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[float]]:
'''simple docstring'''
_lowerCamelCase : Any = get_data(_lowerCamelCase )
_lowerCamelCase : List[str] = calculate_each_score(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = generate_final_scores(_lowerCamelCase )
# append scores to source data
for i, ele in enumerate(_lowerCamelCase ):
source_data[i].append(_lowerCamelCase )
return source_data
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
_lowerCAmelCase : List[str] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_lowerCamelCase ) != count_coins(_lowerCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCamelCase, _lowerCamelCase : Dict = get_distrib(node.left )
_lowerCamelCase, _lowerCamelCase : str = get_distrib(node.right )
_lowerCamelCase : Any = 1 - left_distrib_excess
_lowerCamelCase : Dict = 1 - right_distrib_excess
_lowerCamelCase : Union[str, Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowerCamelCase )
+ abs(_lowerCamelCase )
)
_lowerCamelCase : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowerCamelCase , _lowerCamelCase )
return get_distrib(_lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
import math
def lowerCamelCase_( _lowerCamelCase ) -> list[int]:
'''simple docstring'''
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : Tuple = int(math.sqrt(_lowerCamelCase ) ) # Size of every segment
_lowerCamelCase : Tuple = [True] * (end + 1)
_lowerCamelCase : Union[str, Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowerCamelCase )
for i in range(start * start , end + 1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = False
start += 1
prime += in_prime
_lowerCamelCase : Optional[int] = end + 1
_lowerCamelCase : Tuple = min(2 * end , _lowerCamelCase )
while low <= n:
_lowerCamelCase : List[Any] = [True] * (high - low + 1)
for each in in_prime:
_lowerCamelCase : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowerCamelCase , high + 1 , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = False
for j in range(len(_lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCamelCase : Optional[int] = high + 1
_lowerCamelCase : Union[str, Any] = min(high + end , _lowerCamelCase )
return prime
print(sieve(10**6))
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCAmelCase : str = re.compile(R'''\s+''')
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_lowerCamelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Any = [len(_lowerCamelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCamelCase ), "line_max": max(_lowerCamelCase )}
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=5 ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : str = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : str = example["content"].splitlines()
for _, line in zip(range(_lowerCamelCase ) , _lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=5 , _lowerCamelCase=0.0_5 ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCamelCase ) , _lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Dict = example["content"].count("\n" )
_lowerCamelCase : Optional[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = ["def ", "class ", "for ", "while "]
_lowerCamelCase : Union[str, Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=4 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : List[Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Tuple = len(example["content"] ) / len(_lowerCamelCase )
return {"ratio": ratio}
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : str = {}
results.update(get_hash(_lowerCamelCase ) )
results.update(line_stats(_lowerCamelCase ) )
results.update(alpha_stats(_lowerCamelCase ) )
results.update(char_token_ratio(_lowerCamelCase ) )
results.update(is_autogenerated(_lowerCamelCase ) )
results.update(is_config_or_test(_lowerCamelCase ) )
results.update(has_no_keywords(_lowerCamelCase ) )
results.update(has_few_assignments(_lowerCamelCase ) )
return results
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if not check_uniques(_lowerCamelCase , _lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
with open(_lowerCamelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCamelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCamelCase , _lowerCamelCase )
os.unlink(_lowerCamelCase )
# Settings
_lowerCAmelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCAmelCase : List[str] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Optional[Any] = multiprocessing.cpu_count()
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_lowerCAmelCase : List[Any] = time.time()
_lowerCAmelCase : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_lowerCAmelCase : Dict = set(ds.unique('''hash'''))
_lowerCAmelCase : Union[str, Any] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_lowerCAmelCase : Tuple = time.time()
_lowerCAmelCase : List[Any] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_lowerCAmelCase : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCAmelCase : Optional[int] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCAmelCase : Optional[int] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCAmelCase : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
_lowerCAmelCase : Optional[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict=7 ,__lowerCAmelCase: List[str]=3 ,__lowerCAmelCase: Any=18 ,__lowerCAmelCase: Any=30 ,__lowerCAmelCase: Tuple=400 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: List[Any]=[0.5, 0.5, 0.5] ,__lowerCAmelCase: Tuple=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
_lowerCamelCase : List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = min_resolution
_lowerCamelCase : Tuple = max_resolution
_lowerCamelCase : Optional[Any] = do_resize
_lowerCamelCase : Optional[Any] = size
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Union[str, Any] = crop_size
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Any = image_mean
_lowerCamelCase : Union[str, Any] = image_std
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = LevitImageProcessor if is_vision_available() else None
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = LevitImageProcessingTester(self )
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_std" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_center_crop" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
_lowerCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_lowerCamelCase : List[str] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : List[Any] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_lowerCamelCase : str = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : Union[str, Any] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_lowerCamelCase : str = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : List[str] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowerCAmelCase : int = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = list(s_dict.keys() )
for key in keys:
_lowerCamelCase : int = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_lowerCamelCase : List[Any] = new_key.replace(_lowerCamelCase , _lowerCamelCase )
print(F"""{key} -> {new_key}""" )
_lowerCamelCase : Union[str, Any] = s_dict.pop(_lowerCamelCase )
return s_dict
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = emb.weight.shape
_lowerCamelCase : Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCamelCase : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> bytes:
'''simple docstring'''
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = os.path.basename(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = url.split("/" )[-2]
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ) and not os.path.isfile(_lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_lowerCamelCase ):
_lowerCamelCase : List[Any] = open(_lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(_lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_lowerCamelCase ) as source, open(_lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=_lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
_lowerCamelCase : str = source.read(8192 )
if not buffer:
break
output.write(_lowerCamelCase )
loop.update(len(_lowerCamelCase ) )
_lowerCamelCase : Dict = open(_lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(_lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_lowerCamelCase : Tuple = _download(_MODELS[checkpoint_path] )
else:
_lowerCamelCase : List[Any] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCamelCase : Dict = original_checkpoint["dims"]
_lowerCamelCase : Union[str, Any] = original_checkpoint["model_state_dict"]
_lowerCamelCase : int = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_lowerCamelCase )
rename_keys(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_lowerCamelCase : Dict = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=_lowerCamelCase , decoder_ffn_dim=_lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_lowerCamelCase : List[str] = WhisperForConditionalGeneration(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : int = model.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if len(_lowerCamelCase ) > 0 and not set(_lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
_lowerCamelCase : Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCamelCase : Tuple = proj_out_weights
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowerCAmelCase : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase=28123 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : List[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_lowerCamelCase : Optional[int] = set()
_lowerCamelCase : List[str] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_lowerCamelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_lowerCamelCase ) )]
def lowerCamelCase_( _lowerCamelCase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_lowerCamelCase : List[str] = all_rotations(_lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowerCamelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowerCamelCase ),
}
return response
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_lowerCamelCase : Tuple = int(_lowerCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_lowerCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_lowerCamelCase : Dict = [""] * len(_lowerCamelCase )
for _ in range(len(_lowerCamelCase ) ):
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_lowerCAmelCase : Any = '''Provide a string that I will generate its BWT transform: '''
_lowerCAmelCase : Optional[Any] = input(entry_msg).strip()
_lowerCAmelCase : List[str] = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
_lowerCAmelCase : str = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A_ ( _a ):
lowerCAmelCase__ = 'Wav2Vec2FeatureExtractor'
lowerCAmelCase__ = 'AutoTokenizer'
def __init__( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = self.feature_extractor
_lowerCamelCase : int = False
@classmethod
def _lowercase ( cls: Dict ,__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
try:
return super().from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " ,__lowerCAmelCase ,)
_lowerCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Dict = WavaVecaCTCTokenizer.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
return cls(feature_extractor=__lowerCAmelCase ,tokenizer=__lowerCAmelCase )
def __call__( self: str ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: str ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase ,**__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_lowerCamelCase : Tuple = kwargs.pop("raw_speech" )
else:
_lowerCamelCase : Any = kwargs.pop("audio" ,__lowerCAmelCase )
_lowerCamelCase : Dict = kwargs.pop("sampling_rate" ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = kwargs.pop("text" ,__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_lowerCamelCase : List[str] = self.feature_extractor(__lowerCAmelCase ,*__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None:
_lowerCamelCase : Tuple = self.tokenizer(__lowerCAmelCase ,**__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : Optional[Any] = encodings["input_ids"]
return inputs
def _lowercase ( self: List[Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = kwargs.pop("input_features" ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = kwargs.pop("labels" ,__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[str] = args[0]
_lowerCamelCase : Optional[int] = args[1:]
if input_features is not None:
_lowerCamelCase : Union[str, Any] = self.feature_extractor.pad(__lowerCAmelCase ,*__lowerCAmelCase ,**__lowerCAmelCase )
if labels is not None:
_lowerCamelCase : int = self.tokenizer.pad(__lowerCAmelCase ,**__lowerCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCamelCase : str = labels["input_ids"]
return input_features
def _lowercase ( self: List[Any] ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Dict ,*__lowerCAmelCase: str ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase ,**__lowerCAmelCase )
@contextmanager
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_lowerCamelCase : List[Any] = True
_lowerCamelCase : str = self.tokenizer
yield
_lowerCamelCase : Any = self.feature_extractor
_lowerCamelCase : List[Any] = False
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : Optional[int] = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['''PerceiverFeatureExtractor''']
_lowerCAmelCase : Optional[Any] = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : Tuple = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : int = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : str = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowerCamelCase : int = value
elif weight_type == "weight_g":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "weight_v":
_lowerCamelCase : Union[str, Any] = value
elif weight_type == "bias":
_lowerCamelCase : Tuple = value
else:
_lowerCamelCase : Union[str, Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Optional[int] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : Tuple = True
if "*" in mapped_key:
_lowerCamelCase : Any = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : List[str] = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
_lowerCamelCase : Optional[int] = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : str = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_lowerCamelCase : Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Tuple = "weight"
else:
_lowerCamelCase : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = full_name.split("conv_layers." )[-1]
_lowerCamelCase : int = name.split("." )
_lowerCamelCase : Dict = int(items[0] )
_lowerCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowerCamelCase : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowerCamelCase : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase )
_lowerCamelCase : Any = WavLMConfigOrig(checkpoint["cfg"] )
_lowerCamelCase : Dict = WavLMOrig(_lowerCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_lowerCamelCase : Optional[int] = WavLMConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCamelCase : int = WavLMConfig()
_lowerCamelCase : int = WavLMModel(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase )
hf_wavlm.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The column name of the images in the files.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'A folder containing the training data.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'A folder containing the validation data.'} )
lowerCAmelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {}
if self.train_dir is not None:
_lowerCamelCase : Optional[Any] = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase : str = self.validation_dir
_lowerCamelCase : Any = data_files if data_files else None
@dataclass
class A_ :
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_lowerCamelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase : int = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowerCamelCase ) and data_args.train_val_split > 0.0:
_lowerCamelCase : Dict = ds["train"].train_test_split(data_args.train_val_split )
_lowerCamelCase : List[Any] = split["train"]
_lowerCamelCase : List[str] = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Tuple = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **_lowerCamelCase )
elif model_args.model_name_or_path:
_lowerCamelCase : Optional[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_lowerCamelCase )
else:
_lowerCamelCase : str = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_lowerCamelCase )
elif model_args.model_name_or_path:
_lowerCamelCase : Tuple = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_lowerCamelCase )
else:
_lowerCamelCase : Optional[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_lowerCamelCase : Dict = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_lowerCamelCase : Optional[Any] = ViTMAEForPreTraining(_lowerCamelCase )
if training_args.do_train:
_lowerCamelCase : int = ds["train"].column_names
else:
_lowerCamelCase : List[Any] = ds["validation"].column_names
if data_args.image_column_name is not None:
_lowerCamelCase : List[Any] = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase : int = "image"
elif "img" in column_names:
_lowerCamelCase : Tuple = "img"
else:
_lowerCamelCase : Tuple = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_lowerCamelCase : Dict = image_processor.size["shortest_edge"]
else:
_lowerCamelCase : Optional[int] = (image_processor.size["height"], image_processor.size["width"])
_lowerCamelCase : Optional[Any] = Compose(
[
Lambda(lambda _lowerCamelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(_lowerCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_lowerCamelCase ):
_lowerCamelCase : Optional[Any] = [transforms(_lowerCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_lowerCamelCase : int = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_lowerCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_lowerCamelCase : Optional[int] = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_lowerCamelCase )
# Compute absolute learning rate
_lowerCamelCase : List[str] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_lowerCamelCase : int = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_lowerCamelCase : Any = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCamelCase : str = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Union[str, Any] = last_checkpoint
_lowerCamelCase : List[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
# Write model card and (optionally) push to hub
_lowerCamelCase : int = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self: Optional[int] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: int=2 ,__lowerCAmelCase: Dict=8 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Any=99 ,__lowerCAmelCase: int=16 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: str=36 ,__lowerCAmelCase: List[str]="gelu" ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Any=512 ,__lowerCAmelCase: Union[str, Any]=16 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: List[str]=4 ,__lowerCAmelCase: Tuple=None ,):
'''simple docstring'''
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : List[Any] = use_input_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Optional[Any] = type_vocab_size
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : List[Any] = scope
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : str = None
if self.use_input_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : Dict = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : Dict = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCamelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,)
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : str = self.get_config()
_lowerCamelCase : Dict = 300
return config
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : int = self.prepare_config_and_inputs()
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = MraModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = MraModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,encoder_attention_mask=__lowerCAmelCase ,)
_lowerCamelCase : str = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,)
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = MraForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = MraForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : Any = MraForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : int = MraForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.num_choices
_lowerCamelCase : Optional[Any] = MraForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Any = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : str = MraModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Dict = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = MraModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def _lowercase ( self: str ):
'''simple docstring'''
return
@require_torch
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
_lowerCamelCase : List[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCAmelCase )[0]
_lowerCamelCase : str = torch.Size((1, 256, 768) )
self.assertEqual(output.shape ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
_lowerCamelCase : List[str] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCamelCase : Dict = model(__lowerCAmelCase )[0]
_lowerCamelCase : Optional[Any] = 50_265
_lowerCamelCase : Dict = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
_lowerCamelCase : str = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCamelCase : Any = model(__lowerCAmelCase )[0]
_lowerCamelCase : Any = 50_265
_lowerCamelCase : Dict = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape ,__lowerCAmelCase )
_lowerCamelCase : int = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class A_ ( _a ):
lowerCAmelCase__ = 'gpt_neox_japanese'
def __init__( self: str ,__lowerCAmelCase: List[str]=32_000 ,__lowerCAmelCase: Tuple=2_560 ,__lowerCAmelCase: Tuple=32 ,__lowerCAmelCase: Tuple=32 ,__lowerCAmelCase: Optional[int]=4 ,__lowerCAmelCase: int="gelu" ,__lowerCAmelCase: Optional[Any]=1.00 ,__lowerCAmelCase: int=10_000 ,__lowerCAmelCase: List[str]=2_048 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: Tuple=1e-5 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=31_996 ,__lowerCAmelCase: Union[str, Any]=31_999 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: int=0.0 ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_multiple_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : str = rotary_pct
_lowerCamelCase : Any = rotary_emb_base
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : List[str] = attention_dropout
_lowerCamelCase : int = hidden_dropout
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCAmelCase : str = '''src/diffusers'''
_lowerCAmelCase : List[Any] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCAmelCase : Union[str, Any] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCAmelCase : Union[str, Any] = spec.loader.load_module()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return line.startswith(_lowerCamelCase ) or len(_lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , _lowerCamelCase ) is not None
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Any = object_name.split("." )
_lowerCamelCase : Tuple = 0
# First let's find the module where our object lives.
_lowerCamelCase : Dict = parts[i]
while i < len(_lowerCamelCase ) and not os.path.isfile(os.path.join(_lowerCamelCase , F"""{module}.py""" ) ):
i += 1
if i < len(_lowerCamelCase ):
_lowerCamelCase : Dict = os.path.join(_lowerCamelCase , parts[i] )
if i >= len(_lowerCamelCase ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_lowerCamelCase , F"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCamelCase : Any = f.readlines()
# Now let's find the class / func in the code!
_lowerCamelCase : Tuple = ""
_lowerCamelCase : List[str] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_lowerCamelCase ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_lowerCamelCase ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCamelCase : Optional[Any] = line_index
while line_index < len(_lowerCamelCase ) and _should_continue(lines[line_index] , _lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCamelCase : int = lines[start_index:line_index]
return "".join(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowerCAmelCase : str = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowerCAmelCase : List[Any] = re.compile(R'''<FILL\s+[^>]*>''')
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = code.split("\n" )
_lowerCamelCase : Optional[Any] = 0
while idx < len(_lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_lowerCamelCase ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = len(get_indent(_lowerCamelCase ) ) > 0
if has_indent:
_lowerCamelCase : Any = F"""class Bla:\n{code}"""
_lowerCamelCase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_lowerCamelCase )
_lowerCamelCase : List[str] = black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = style_docstrings_in_code(_lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCamelCase : Union[str, Any] = f.readlines()
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_lowerCamelCase ):
_lowerCamelCase : Tuple = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = search.groups()
_lowerCamelCase : Dict = find_code_in_diffusers(_lowerCamelCase )
_lowerCamelCase : Optional[int] = get_indent(_lowerCamelCase )
_lowerCamelCase : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCamelCase : Any = theoretical_indent
_lowerCamelCase : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCamelCase : Any = True
while line_index < len(_lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_lowerCamelCase ):
break
_lowerCamelCase : int = lines[line_index]
_lowerCamelCase : Optional[int] = _should_continue(_lowerCamelCase , _lowerCamelCase ) and re.search(F"""^{indent}# End copy""" , _lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCamelCase : Optional[int] = lines[start_index:line_index]
_lowerCamelCase : List[Any] = "".join(_lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCamelCase : Union[str, Any] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_lowerCamelCase ) is None]
_lowerCamelCase : Optional[Any] = "\n".join(_lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_lowerCamelCase ) > 0:
_lowerCamelCase : int = replace_pattern.replace("with" , "" ).split("," )
_lowerCamelCase : Dict = [_re_replace_pattern.search(_lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = pattern.groups()
_lowerCamelCase : Optional[Any] = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if option.strip() == "all-casing":
_lowerCamelCase : Dict = re.sub(obja.lower() , obja.lower() , _lowerCamelCase )
_lowerCamelCase : List[Any] = re.sub(obja.upper() , obja.upper() , _lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCamelCase : Dict = blackify(lines[start_index - 1] + theoretical_code )
_lowerCamelCase : Any = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCamelCase : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCamelCase : List[str] = start_index + 1
if overwrite and len(_lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_lowerCamelCase )
return diffs
def lowerCamelCase_( _lowerCamelCase = False ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[Any] = glob.glob(os.path.join(_lowerCamelCase , "**/*.py" ) , recursive=_lowerCamelCase )
_lowerCamelCase : Dict = []
for filename in all_files:
_lowerCamelCase : int = is_copy_consistent(_lowerCamelCase , _lowerCamelCase )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_lowerCamelCase ) > 0:
_lowerCamelCase : Optional[int] = "\n".join(_lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
_lowerCAmelCase : List[str] = 300 # TEMPERATURE (unit = K)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A_ ( _a ):
def __init__( self: List[Any] ,__lowerCAmelCase: Dict=0.01 ,__lowerCAmelCase: Optional[Any]=1_000 ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = p_stop
_lowerCamelCase : List[str] = max_length
def __iter__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = 0
_lowerCamelCase : str = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCamelCase : Any = random.random() < self.p_stop
class A_ ( unittest.TestCase ):
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: Union[str, Any]=True ):
'''simple docstring'''
_lowerCamelCase : str = [
BatchSamplerShard(__lowerCAmelCase ,2 ,__lowerCAmelCase ,split_batches=__lowerCAmelCase ,even_batches=__lowerCAmelCase )
for i in range(2 )
]
_lowerCamelCase : Optional[int] = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] ,[len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[str] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : Tuple = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : str = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Tuple = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : List[str] = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : List[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Any = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase )
_lowerCamelCase : str = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase )
_lowerCamelCase : int = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : Tuple = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : str = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase )
_lowerCamelCase : List[str] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
_lowerCamelCase : List[str] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : Optional[int] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
_lowerCamelCase : int = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : List[str] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,even_batches=__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase ,even_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase ,even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : str = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase ,even_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase ,even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : str = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase ,even_batches=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase ,even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Tuple = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase ,even_batches=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase ,__lowerCAmelCase ,split_batches=__lowerCAmelCase ,even_batches=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_lowerCamelCase : Any = [BatchSamplerShard(__lowerCAmelCase ,2 ,__lowerCAmelCase ,even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) ,3 )
self.assertEqual(len(batch_sampler_shards[1] ) ,2 )
self.assertListEqual(list(batch_sampler_shards[0] ) ,[[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) ,[[3, 4], [9, 10, 11]] )
def _lowercase ( self: Any ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict=False ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: int=False ):
'''simple docstring'''
random.seed(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = list(__lowerCAmelCase )
_lowerCamelCase : str = [
IterableDatasetShard(
__lowerCAmelCase ,batch_size=__lowerCAmelCase ,drop_last=__lowerCAmelCase ,num_processes=__lowerCAmelCase ,process_index=__lowerCAmelCase ,split_batches=__lowerCAmelCase ,)
for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCamelCase : Optional[int] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) ,len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
_lowerCamelCase : List[Any] = []
for idx in range(0 ,len(__lowerCAmelCase ) ,__lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase ,reference[: len(__lowerCAmelCase )] )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Dict = 42
_lowerCamelCase : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase ,__lowerCAmelCase ,batch_size=4 ,drop_last=__lowerCAmelCase ,split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase ,__lowerCAmelCase ,batch_size=4 ,drop_last=__lowerCAmelCase ,split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase ,__lowerCAmelCase ,batch_size=4 ,drop_last=__lowerCAmelCase ,split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase ,__lowerCAmelCase ,batch_size=4 ,drop_last=__lowerCAmelCase ,split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
_lowerCamelCase : Any = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase ,__lowerCAmelCase ,batch_size=4 ,drop_last=__lowerCAmelCase ,split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase ,__lowerCAmelCase ,batch_size=4 ,drop_last=__lowerCAmelCase ,split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase ,__lowerCAmelCase ,batch_size=4 ,drop_last=__lowerCAmelCase ,split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase ,__lowerCAmelCase ,batch_size=4 ,drop_last=__lowerCAmelCase ,split_batches=__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = BatchSampler(range(16 ) ,batch_size=4 ,drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = SkipBatchSampler(__lowerCAmelCase ,2 )
self.assertListEqual(list(__lowerCAmelCase ) ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = SkipDataLoader(list(range(16 ) ) ,batch_size=4 ,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = DataLoader(list(range(16 ) ) ,batch_size=4 )
_lowerCamelCase : Union[str, Any] = skip_first_batches(__lowerCAmelCase ,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = DataLoaderShard(list(range(16 ) ) ,batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
def _lowercase ( self: Tuple ):
'''simple docstring'''
Accelerator()
_lowerCamelCase : List[str] = DataLoaderDispatcher(range(16 ) ,batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class A_ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self: Optional[Any] ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Any ):
'''simple docstring'''
super().__init__(features=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
import torch
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and column:
if all(
isinstance(__lowerCAmelCase ,torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__lowerCAmelCase )
return column
def _lowercase ( self: str ,__lowerCAmelCase: str ):
'''simple docstring'''
import torch
if isinstance(__lowerCAmelCase ,(str, bytes, type(__lowerCAmelCase )) ):
return value
elif isinstance(__lowerCAmelCase ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCamelCase : Tuple = {}
if isinstance(__lowerCAmelCase ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
_lowerCamelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(__lowerCAmelCase ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCamelCase : Optional[int] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCAmelCase ,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = np.asarray(__lowerCAmelCase )
return torch.tensor(__lowerCAmelCase ,**{**default_dtype, **self.torch_tensor_kwargs} )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(__lowerCAmelCase ,"__array__" ) and not isinstance(__lowerCAmelCase ,torch.Tensor ):
_lowerCamelCase : str = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCAmelCase ,np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,__lowerCAmelCase ,map_list=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: pa.Table ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.numpy_arrow_extractor().extract_row(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(__lowerCAmelCase )
return self.recursive_tensorize(__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: pa.Table ):
'''simple docstring'''
_lowerCamelCase : int = self.numpy_arrow_extractor().extract_column(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.python_features_decoder.decode_column(__lowerCAmelCase ,pa_table.column_names[0] )
_lowerCamelCase : List[Any] = self.recursive_tensorize(__lowerCAmelCase )
_lowerCamelCase : List[str] = self._consolidate(__lowerCAmelCase )
return column
def _lowercase ( self: str ,__lowerCAmelCase: pa.Table ):
'''simple docstring'''
_lowerCamelCase : int = self.numpy_arrow_extractor().extract_batch(__lowerCAmelCase )
_lowerCamelCase : str = self.python_features_decoder.decode_batch(__lowerCAmelCase )
_lowerCamelCase : str = self.recursive_tensorize(__lowerCAmelCase )
for column_name in batch:
_lowerCamelCase : int = self._consolidate(batch[column_name] )
return batch
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
if not head:
return True
# split the list to two parts
_lowerCamelCase, _lowerCamelCase : List[str] = head.next, head
while fast and fast.next:
_lowerCamelCase : Union[str, Any] = fast.next.next
_lowerCamelCase : str = slow.next
_lowerCamelCase : Any = slow.next
_lowerCamelCase : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
_lowerCamelCase : int = None
while second:
_lowerCamelCase : Union[str, Any] = second.next
_lowerCamelCase : Dict = node
_lowerCamelCase : List[Any] = second
_lowerCamelCase : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_lowerCamelCase : List[Any] = node.next
_lowerCamelCase : Dict = head.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_lowerCamelCase : Tuple = head
while fast and fast.next:
_lowerCamelCase, _lowerCamelCase : str = fast.next.next, slow.next
# 2. Push the second half into the stack
_lowerCamelCase : Dict = [slow.val]
while slow.next:
_lowerCamelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_lowerCamelCase : Optional[int] = cur.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if not head or not head.next:
return True
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(_lowerCamelCase )
else:
_lowerCamelCase : Any = [pos]
_lowerCamelCase : Any = head.next
pos += 1
_lowerCamelCase : str = pos - 1
_lowerCamelCase : Optional[int] = 0
for v in d.values():
if len(_lowerCamelCase ) % 2 != 0:
middle += 1
else:
_lowerCamelCase : Optional[int] = 0
for i in range(0 , len(_lowerCamelCase ) ):
if v[i] + v[len(_lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 50 ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase : int = ['''small''', '''medium''', '''large''']
_lowerCAmelCase : str = '''lm_head.decoder.weight'''
_lowerCAmelCase : List[Any] = '''lm_head.weight'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : str = torch.load(_lowerCamelCase )
_lowerCamelCase : List[Any] = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase : Optional[int] = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
_lowerCAmelCase : List[str] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[str]=13 ,__lowerCAmelCase: List[Any]=30 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: str=True ,__lowerCAmelCase: List[str]=32 ,__lowerCAmelCase: Dict=2 ,__lowerCAmelCase: Union[str, Any]=4 ,__lowerCAmelCase: List[Any]=37 ,__lowerCAmelCase: List[str]="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: Optional[int]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Union[str, Any]=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : int = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : int = mask_ratio
_lowerCamelCase : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
_lowerCamelCase : Optional[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = TFViTMAEModel(config=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = TFViTMAEForPreTraining(__lowerCAmelCase )
_lowerCamelCase : Any = model(__lowerCAmelCase ,training=__lowerCAmelCase )
# expected sequence length = num_patches
_lowerCamelCase : Union[str, Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Any = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Optional[Any] = TFViTMAEForPreTraining(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,training=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
((_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase)) : List[Any] = config_and_inputs
_lowerCamelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCAmelCase__ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = TFViTMAEModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,tf.keras.layers.Layer ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
_lowerCamelCase : str = copy.deepcopy(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=__lowerCAmelCase )
_lowerCamelCase : List[str] = outputs_dict[0].numpy()
_lowerCamelCase : int = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1e-6 )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowerCAmelCase: Any ):
_lowerCamelCase : List[str] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = v.numpy()
else:
_lowerCamelCase : Optional[int] = np.array(__lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = prepare_numpy_arrays(__lowerCAmelCase )
_lowerCamelCase : Any = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
_lowerCamelCase : Tuple = model(**__lowerCAmelCase ,noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Optional[int] = tf.constant(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Tuple = tf_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowerCAmelCase )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(__lowerCAmelCase ,__lowerCAmelCase ),)
if isinstance(__lowerCAmelCase ,__lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowerCAmelCase ,"_keras_serializable" ,__lowerCAmelCase )
}
_lowerCamelCase : List[str] = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Optional[int] = tf.convert_to_tensor(__lowerCAmelCase )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase : Optional[Any] = main_layer_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase : Union[str, Any] = tf.keras.Model(__lowerCAmelCase ,outputs=main_layer(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase ,"keras_model.h5" )
model.save(__lowerCAmelCase )
_lowerCamelCase : str = tf.keras.models.load_model(
__lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowerCAmelCase ,tf.keras.Model )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase ,__lowerCAmelCase )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase : Optional[int] = outputs.last_hidden_state.numpy()
_lowerCamelCase : List[str] = 0
else:
_lowerCamelCase : str = outputs.logits.numpy()
_lowerCamelCase : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase ,saved_model=__lowerCAmelCase )
_lowerCamelCase : Dict = model_class.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase : int = after_outputs["last_hidden_state"].numpy()
_lowerCamelCase : Dict = 0
else:
_lowerCamelCase : List[Any] = after_outputs["logits"].numpy()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase : Dict = model_class.from_config(model.config )
_lowerCamelCase : str = new_model(__lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase : Union[str, Any] = new_model(__lowerCAmelCase ,noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase ,__lowerCAmelCase )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Any ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
_lowerCamelCase : List[str] = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(images=__lowerCAmelCase ,return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Dict = ViTMAEConfig()
_lowerCamelCase : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase : str = model(**__lowerCAmelCase ,noise=__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,__lowerCAmelCase ,atol=1e-4 )
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: Optional[int]=7 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Tuple=99 ,__lowerCAmelCase: Optional[Any]=64 ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: Dict=5 ,__lowerCAmelCase: Tuple=4 ,__lowerCAmelCase: Union[str, Any]=37 ,__lowerCAmelCase: Union[str, Any]="gelu" ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: Optional[int]=512 ,__lowerCAmelCase: Tuple=16 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Optional[int]=3 ,__lowerCAmelCase: Tuple=4 ,__lowerCAmelCase: int=None ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : List[Any] = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : str = use_input_mask
_lowerCamelCase : List[str] = use_token_type_ids
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Optional[Any] = embedding_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : str = num_choices
_lowerCamelCase : Optional[Any] = scope
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : Dict = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = MegatronBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = MegatronBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : int = MegatronBertForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = MegatronBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = MegatronBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,next_sentence_label=__lowerCAmelCase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def _lowercase ( self: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = MegatronBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : List[str] = MegatronBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : Dict = MegatronBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: str ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.num_choices
_lowerCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Dict = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Any = config_and_inputs
_lowerCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
# test_resize_embeddings = False
lowerCAmelCase__ = False
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any]=False ):
'''simple docstring'''
_lowerCamelCase : List[str] = super()._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_lowerCamelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__lowerCAmelCase )
return inputs_dict
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = MegatronBertModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowerCAmelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
_lowerCAmelCase : List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
_lowerCamelCase : Optional[int] = os.path.join(os.environ["MYDIR"] ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.half()
_lowerCamelCase : Optional[int] = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
_lowerCamelCase : Any = model(__lowerCAmelCase )[0]
_lowerCamelCase : List[str] = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape ,__lowerCAmelCase )
_lowerCamelCase : Any = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
_lowerCamelCase : str = output[0, ii, jj]
_lowerCamelCase : Optional[int] = expected[3 * ii + jj]
_lowerCamelCase : Union[str, Any] = "ii={} jj={} a={} b={}".format(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
self.assertTrue(math.isclose(__lowerCAmelCase ,__lowerCAmelCase ,rel_tol=__lowerCAmelCase ,abs_tol=__lowerCAmelCase ) ,msg=__lowerCAmelCase )
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = 0 ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = right or len(_lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowerCamelCase , _lowerCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_lowerCAmelCase : str = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = {"BertModelTest": "BertModelTester"}
_lowerCamelCase : Union[str, Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_lowerCamelCase : Optional[Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_lowerCamelCase : List[str] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
if openai_config_file == "":
_lowerCamelCase : Optional[Any] = OpenAIGPTConfig()
else:
_lowerCamelCase : Tuple = OpenAIGPTConfig.from_json_file(_lowerCamelCase )
_lowerCamelCase : List[Any] = OpenAIGPTModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCamelCase : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_lowerCamelCase : List[str] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
_lowerCAmelCase : str = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class A_ :
lowerCAmelCase__ = LEDConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = 'gelu'
def __init__( self: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: str=13 ,__lowerCAmelCase: str=7 ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Tuple=99 ,__lowerCAmelCase: Tuple=32 ,__lowerCAmelCase: int=2 ,__lowerCAmelCase: Optional[Any]=4 ,__lowerCAmelCase: Tuple=37 ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: Optional[Any]=20 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: int=1 ,__lowerCAmelCase: int=0 ,__lowerCAmelCase: Optional[Any]=4 ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : int = eos_token_id
_lowerCamelCase : Dict = pad_token_id
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_lowerCamelCase : str = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_lowerCamelCase : Any = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
_lowerCamelCase : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
_lowerCamelCase : Optional[int] = tf.concat([input_ids, eos_tensor] ,axis=1 )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : List[str] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,attention_window=self.attention_window ,**self.config_updates ,)
_lowerCamelCase : int = prepare_led_inputs_dict(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tf.concat(
[tf.zeros_like(__lowerCAmelCase )[:, :-1], tf.ones_like(__lowerCAmelCase )[:, -1:]] ,axis=-1 ,)
_lowerCamelCase : Optional[int] = global_attention_mask
return config, inputs_dict
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = TFLEDModel(config=__lowerCAmelCase ).get_decoder()
_lowerCamelCase : str = inputs_dict["input_ids"]
_lowerCamelCase : List[str] = input_ids[:1, :]
_lowerCamelCase : Any = inputs_dict["attention_mask"][:1, :]
_lowerCamelCase : Any = 1
# first forward pass
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,use_cache=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
_lowerCamelCase : Dict = tf.concat([input_ids, next_tokens] ,axis=-1 )
_lowerCamelCase : Tuple = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
_lowerCamelCase : List[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase ,__lowerCAmelCase ,rtol=1e-3 )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCAmelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = TFLEDModelTester(self )
_lowerCamelCase : str = ConfigTester(self ,config_class=__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] = tf.zeros_like(inputs_dict["attention_mask"] )
_lowerCamelCase : Dict = 2
_lowerCamelCase : int = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices ,1 ,inputs_dict["global_attention_mask"] ,)
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[Any] = self.model_tester.seq_length
_lowerCamelCase : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCAmelCase: List[Any] ):
_lowerCamelCase : Dict = outputs.decoder_attentions
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
def check_encoder_attentions_output(__lowerCAmelCase: Tuple ):
_lowerCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_lowerCamelCase : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] ,)
for model_class in self.all_model_classes:
_lowerCamelCase : str = True
_lowerCamelCase : str = False
_lowerCamelCase : Any = False
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : List[str] = len(__lowerCAmelCase )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
if self.is_encoder_decoder:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_decoder_attentions_output(__lowerCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCamelCase : Any = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(__lowerCAmelCase ) )
self.assertEqual(model.config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _lowercase ( self: int ):
'''simple docstring'''
pass
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
return tf.constant(_lowerCamelCase , dtype=tf.intaa )
_lowerCAmelCase : List[str] = 1e-4
@slow
@require_tf
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_lowerCamelCase : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCamelCase : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCamelCase : List[Any] = prepare_led_inputs_dict(model.config ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase )[0]
_lowerCamelCase : List[str] = (1, 1_024, 768)
self.assertEqual(output.shape ,__lowerCAmelCase )
# change to expected output here
_lowerCamelCase : List[str] = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-3 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_lowerCamelCase : Tuple = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCamelCase : Any = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCamelCase : List[str] = prepare_led_inputs_dict(model.config ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape ,__lowerCAmelCase )
# change to expected output here
_lowerCamelCase : List[Any] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-3 ,rtol=1e-3 )
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowerCamelCase : Any = [p / w for p, w in zip(_lowerCamelCase , _lowerCamelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowerCamelCase : Dict = sorted(_lowerCamelCase )
# declaring useful variables
_lowerCamelCase : Optional[int] = len(_lowerCamelCase )
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowerCamelCase : int = sorted_profit_by_weight[length - i - 1]
_lowerCamelCase : Dict = profit_by_weight.index(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
_lowerCAmelCase : Optional[Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
_lowerCAmelCase : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
_lowerCAmelCase : Optional[Any] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCamelCase_( *_lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=True , _lowerCamelCase=2 ) -> Any:
'''simple docstring'''
from .. import __version__
_lowerCamelCase : List[Any] = take_from
_lowerCamelCase : List[Any] = ()
if not isinstance(args[0] , _lowerCamelCase ):
_lowerCamelCase : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
_lowerCamelCase : int = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
_lowerCamelCase : List[Any] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
_lowerCamelCase : List[Any] = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_lowerCamelCase : List[str] = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_lowerCamelCase : Dict = warning + " " if standard_warn else ""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
_lowerCamelCase : List[str] = inspect.getouterframes(inspect.currentframe() )[1]
_lowerCamelCase : Union[str, Any] = call_frame.filename
_lowerCamelCase : str = call_frame.lineno
_lowerCamelCase : Union[str, Any] = call_frame.function
_lowerCamelCase, _lowerCamelCase : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.