code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import string
import numpy
def a__ ( snake_case__ , snake_case__ ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__UpperCamelCase = numpy.vectorize(lambda UpperCAmelCase__ : x % 36 )
__UpperCamelCase = numpy.vectorize(UpperCAmelCase__ )
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = self.modulus(_a ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase = encrypt_key.shape[0]
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.key_string.index(_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.key_string[round(_a )]
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase = det % len(self.key_string )
lowerCamelCase = len(self.key_string )
if greatest_common_divisor(_a , len(self.key_string ) ) != 1:
lowerCamelCase = (
f'determinant modular {req_l} of encryption key({det}) '
f'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = [char for char in text.upper() if char in self.key_string]
lowerCamelCase = chars[-1]
while len(_a ) % self.break_key != 0:
chars.append(_a )
return "".join(_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = self.process_text(text.upper() )
lowerCamelCase = """"""
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
lowerCamelCase = text[i : i + self.break_key]
lowerCamelCase = [self.replace_letters(_a ) for char in batch]
lowerCamelCase = numpy.array([vec] ).T
lowerCamelCase = self.modulus(self.encrypt_key.dot(_a ) ).T.tolist()[
0
]
lowerCamelCase = """""".join(
self.replace_digits(_a ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase = det % len(self.key_string )
lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCamelCase = i
break
lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_a ) )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = self.make_decrypt_key()
lowerCamelCase = self.process_text(text.upper() )
lowerCamelCase = """"""
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
lowerCamelCase = text[i : i + self.break_key]
lowerCamelCase = [self.replace_letters(_a ) for char in batch]
lowerCamelCase = numpy.array([vec] ).T
lowerCamelCase = self.modulus(decrypt_key.dot(_a ) ).T.tolist()[0]
lowerCamelCase = """""".join(
self.replace_digits(_a ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a__ ( ) -> None:
lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(snake_case__ ):
lowerCamelCase = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
lowerCamelCase = HillCipher(numpy.array(snake_case__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 291 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ):
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = spectrogram(
_a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCamelCase = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 291 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = data
lowercase : Optional[int] = [0x6745_2301, 0xEFCD_AB89, 0x98BA_DCFE, 0x1032_5476, 0xC3D2_E1F0]
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return ((n << b) | (n >> (32 - b))) & 0xFFFF_FFFF
def __lowerCamelCase ( self ):
lowercase : List[Any] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
lowercase : Dict = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def __lowerCamelCase ( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = list(struct.unpack('''>16L''' , SCREAMING_SNAKE_CASE__ ) ) + [0] * 64
for i in range(16 , 80 ):
lowercase : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __lowerCamelCase ( self ):
lowercase : Tuple = self.padding()
lowercase : Tuple = self.split_blocks()
for block in self.blocks:
lowercase : str = self.expand_block(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowercase : List[Any] = (b & c) | ((~b) & d)
lowercase : Optional[int] = 0x5A82_7999
elif 20 <= i < 40:
lowercase : Optional[int] = b ^ c ^ d
lowercase : str = 0x6ED9_EBA1
elif 40 <= i < 60:
lowercase : Optional[int] = (b & c) | (b & d) | (c & d)
lowercase : Dict = 0x8F1B_BCDC
elif 60 <= i < 80:
lowercase : Tuple = b ^ c ^ d
lowercase : Any = 0xCA62_C1D6
lowercase , lowercase , lowercase , lowercase , lowercase : Dict = (
self.rotate(SCREAMING_SNAKE_CASE__ , 5 ) + f + e + k + expanded_block[i] & 0xFFFF_FFFF,
a,
self.rotate(SCREAMING_SNAKE_CASE__ , 30 ),
c,
d,
)
lowercase : int = (
self.h[0] + a & 0xFFFF_FFFF,
self.h[1] + b & 0xFFFF_FFFF,
self.h[2] + c & 0xFFFF_FFFF,
self.h[3] + d & 0xFFFF_FFFF,
self.h[4] + e & 0xFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowercase ( ) ->str:
"""simple docstring"""
lowercase : Union[str, Any] = B'''Test String'''
assert SHAaHash(_UpperCamelCase ).final_hash() == hashlib.shaa(_UpperCamelCase ).hexdigest() # noqa: S324
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : List[str] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''', dest='''input_string''', default='''Hello World!! Welcome to Cryptography''', help='''Hash the string''', )
parser.add_argument('''--file''', dest='''input_file''', help='''Hash contents of a file''' )
lowercase : Union[str, Any] = parser.parse_args()
lowercase : int = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file, '''rb''' ) as f:
lowercase : int = f.read()
else:
lowercase : Optional[Any] = bytes(_UpperCamelCase, '''utf-8''' )
print(SHAaHash(_UpperCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 173 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__a = 50_00_00
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Optional[Any] = dataset.map(**_UpperCamelCase )
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : int = dataset.filter(**_UpperCamelCase )
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Dict = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase : List[str] = generate_example_dataset(
os.path.join(_UpperCamelCase, '''dataset.arrow''' ), _UpperCamelCase, num_examples=_UpperCamelCase )
lowercase : List[Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
lowercase : Union[str, Any] = map(_UpperCamelCase )
lowercase : Dict = map(_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''numpy''' ):
lowercase : Dict = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''pandas''' ):
lowercase : Any = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
lowercase : str = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
lowercase : List[str] = map(_UpperCamelCase, function=_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Any = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase, '''wb''' ) as f:
f.write(json.dumps(_UpperCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : Node | None
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Node | None = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase):
_lowercase : Tuple = Node(lowerCamelCase, self.head)
def __iter__( self) -> Iterator[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.head
while node:
yield node.data
_lowercase : int = node.next_node
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCamelCase) for node in self])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 21 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Any = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase)
_lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : Tuple = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : str = after_output[0]
_lowercase : Optional[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase : Any = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : Tuple = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = to_atuple(vision_model.config.image_size)
_lowercase : Any = to_atuple(vision_model.config.patch_size)
_lowercase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
pt_model.to(lowerCamelCase)
pt_model.eval()
# prepare inputs
_lowercase : Any = inputs_dict
_lowercase : Optional[int] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase : Tuple = pt_model(**lowerCamelCase).to_tuple()
_lowercase : Any = fx_model(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase)
_lowercase : List[Any] = fx_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase)
pt_model_loaded.to(lowerCamelCase)
pt_model_loaded.eval()
with torch.no_grad():
_lowercase : Optional[Any] = pt_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : str = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase)
_lowercase : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Tuple = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params)
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase)
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase : List[str] = config_inputs_dict.pop('vision_config')
_lowercase : str = config_inputs_dict.pop('text_config')
_lowercase : int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase)
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : List[Any] = model_a(**lowerCamelCase)
_lowercase : Tuple = after_outputs[0]
_lowercase : Dict = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : List[Any] = 13
_lowercase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowercase : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModelTester(self)
_lowercase : Any = FlaxBertModelTester(self)
_lowercase : Dict = vit_model_tester.prepare_config_and_inputs()
_lowercase : Any = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : List[str] = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : Tuple = 13
_lowercase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Any = random_attention_mask([batch_size, 4])
_lowercase : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = FlaxCLIPVisionModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = FlaxCLIPVisionModelTester(self)
_lowercase : Union[str, Any] = FlaxBertModelTester(self)
_lowercase : Tuple = clip_model_tester.prepare_config_and_inputs()
_lowercase : str = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Dict = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0)
_lowercase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : List[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='np')
_lowercase : List[Any] = model(**lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3))
| 21 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowercase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
A : Optional[int] = []
for num in range(len(snake_case__ ) ):
A : Dict = 0
while 2 * i * i <= odd_composites[num]:
A : Optional[Any] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def lowerCAmelCase_ ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''') | 366 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
A : Tuple = []
def generate(snake_case__ , snake_case__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A, A : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
A, A : Optional[Any] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip()
lowercase : int = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 311 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class __magic_name__ ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ : List[str] = """xmod"""
def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, lowercase_=False, lowercase_=2, lowercase_=False, lowercase_=True, lowercase_=True, lowercase_=("en_XX",), lowercase_=None, **lowercase_, ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE, bos_token_id=__SCREAMING_SNAKE_CASE, eos_token_id=__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE )
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_act
a__ =intermediate_size
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =initializer_range
a__ =layer_norm_eps
a__ =position_embedding_type
a__ =use_cache
a__ =classifier_dropout
a__ =pre_norm
a__ =adapter_reduction_factor
a__ =adapter_layer_norm
a__ =adapter_reuse_layer_norm
a__ =ln_before_adapter
a__ =list(__SCREAMING_SNAKE_CASE )
a__ =default_language
class __magic_name__ ( __snake_case ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a__ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a__ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 188 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_SCREAMING_SNAKE_CASE : Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase__ ( _lowerCamelCase : str ) -> str:
if "://" in dataset_path:
lowerCamelCase_ = dataset_path.split('://' )[1]
return dataset_path
def lowerCamelCase__ ( _lowerCamelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase__ ( _lowerCamelCase : fsspec.AbstractFileSystem , _lowerCamelCase : str , _lowerCamelCase : str ) -> int:
lowerCamelCase_ = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def lowerCamelCase__ ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = threading.Lock()
| 183 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : int=[10, 20, 30, 40] , _UpperCAmelCase : List[Any]=[1, 1, 2, 1] , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple="relu" , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(_A )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = self.get_config()
return config, pixel_values
def lowerCamelCase__ (self : Dict ) -> List[str]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaxRegNetModel(config=_A )
lowercase__ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ (self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaxRegNetForImageClassification(config=_A )
lowercase__ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : Any ) -> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
A__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : str ) -> None:
"""simple docstring"""
lowercase__ = FlaxRegNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
return
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase__ (self : str ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_A )
lowercase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int ):
lowercase__ = model_class(_A )
lowercase__ = model(**self._prepare_for_class(_A , _A ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_A , _A , _A )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(_A , _A )
lowercase__ = model_class(_A )
@jax.jit
def model_jitted(_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : List[str] ):
return model(pixel_values=_A , **_A )
with self.subTest("""JIT Enabled""" ):
lowercase__ = model_jitted(**_A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase__ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ (self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_A , return_tensors="""np""" )
lowercase__ = model(**_A )
# verify the logits
lowercase__ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
lowercase__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 350 |
def UpperCamelCase ( __magic_name__ : str ) -> List[str]: # noqa: E741
"""simple docstring"""
lowercase__ = len(__magic_name__ )
lowercase__ = 0
lowercase__ = [0] * n
lowercase__ = [False] * n
lowercase__ = [False] * n
def dfs(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Any ):
if parent == root:
out_edge_count += 1
lowercase__ = True
lowercase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase__ = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase__ = True
# AP found via cycle
if at == low[to]:
lowercase__ = True
else:
lowercase__ = min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase__ = 0
lowercase__ = dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase__ = out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
A : List[str] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 146 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__UpperCAmelCase : Dict = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_a , _a )
def _lowerCamelCase ( self: str , **__lowerCamelCase: str ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase ( self: Dict , **__lowerCamelCase: Any ) -> List[str]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Tuple = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCAmelCase : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self: Optional[Any] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCAmelCase : str = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
__UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCamelCase ( self: Any ) -> Dict:
__UpperCAmelCase : List[str] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : Any = image_processor(_a , return_tensors="np" )
__UpperCAmelCase : Optional[int] = processor(images=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
__UpperCAmelCase : int = "lower newer"
__UpperCAmelCase : List[Any] = processor(text=_a )
__UpperCAmelCase : Any = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
__UpperCAmelCase : int = "lower newer"
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : List[Any] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
__UpperCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Optional[int] = processor.batch_decode(_a )
__UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCamelCase ( self: Any ) -> Optional[int]:
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
__UpperCAmelCase : Union[str, Any] = "lower newer"
__UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
__UpperCAmelCase : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 157 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__lowercase = {
'''camembert-base''': 512,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=["<s>NOTUSED", "</s>NOTUSED"] , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :Union[str, Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
__UpperCamelCase :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__UpperCamelCase :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__lowercase))
__UpperCamelCase :Optional[int] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__UpperCamelCase :Optional[int] = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__UpperCamelCase :Optional[Any] = len(self.fairseq_tokens_to_ids)
__UpperCamelCase :Union[str, Any] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
__UpperCamelCase :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase :Optional[Any] = [self.cls_token_id]
__UpperCamelCase :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1, 1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :List[str] = [self.sep_token_id]
__UpperCamelCase :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCamelCase__ ( self) -> List[Any]:
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = {self.convert_ids_to_tokens(__lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__lowercase) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase__ ( self , __lowercase) -> str:
__UpperCamelCase :Dict = []
__UpperCamelCase :Dict = ''''''
__UpperCamelCase :Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase) + token
__UpperCamelCase :Union[str, Any] = True
__UpperCamelCase :str = []
else:
current_sub_tokens.append(__lowercase)
__UpperCamelCase :Any = False
out_string += self.sp_model.decode(__lowercase)
return out_string.strip()
def __getstate__( self) -> Optional[int]:
__UpperCamelCase :Any = self.__dict__.copy()
__UpperCamelCase :List[Any] = None
return state
def __setstate__( self , __lowercase) -> Optional[int]:
__UpperCamelCase :List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCamelCase :Optional[Any] = {}
__UpperCamelCase :Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Tuple = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowercase)
elif not os.path.isfile(self.vocab_file):
with open(__lowercase , '''wb''') as fi:
__UpperCamelCase :List[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowercase)
return (out_vocab_file,)
| 105 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = '''▁'''
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = BertGenerationTokenizer
a__ : Dict = False
a__ : str = True
def UpperCamelCase__ ( self) -> List[str]:
super().setUp()
__UpperCamelCase :Optional[Any] = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = '''<s>'''
__UpperCamelCase :Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase) , __lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase) , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(__lowercase) , 1_002)
def UpperCamelCase__ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase)
__UpperCamelCase :Optional[Any] = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase) , [285, 46, 10, 170, 382] , )
__UpperCamelCase :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCamelCase :Any = tokenizer.convert_tokens_to_ids(__lowercase)
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__UpperCamelCase :Tuple = tokenizer.convert_ids_to_tokens(__lowercase)
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase__ ( self) -> int:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = '''Hello World!'''
__UpperCamelCase :Optional[int] = [18_536, 2_260, 101]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase))
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__UpperCamelCase :Union[str, Any] = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase))
@require_torch
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__UpperCamelCase :Optional[Any] = list(self.big_tokenizer.get_vocab().keys())[:10]
__UpperCamelCase :Optional[int] = ''' '''.join(__lowercase)
__UpperCamelCase :Optional[int] = self.big_tokenizer.encode_plus(__lowercase , return_tensors='''pt''' , return_token_type_ids=__lowercase)
__UpperCamelCase :List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__lowercase)
__UpperCamelCase :List[Any] = BertGenerationConfig()
__UpperCamelCase :Optional[Any] = BertGenerationEncoder(__lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase)
model(**__lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
# fmt: off
__UpperCamelCase :List[str] = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 105 | 1 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ) -> List[str]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase : List[str] = TapasConfig.from_json_file(_lowerCAmelCase )
# set absolute/relative position embeddings parameter
UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase : Any = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase : int = 4
UpperCAmelCase : int = True
# hparam_utils.py hparams
UpperCAmelCase : Union[str, Any] = 0.6_6_4_6_9_4
UpperCAmelCase : Tuple = 0.2_0_7_9_5_1
UpperCAmelCase : Dict = 0.1_2_1_1_9_4
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = True
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Tuple = 0.0_3_5_2_5_1_3
UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Tuple = False
# hparam_utils.py hparams
UpperCAmelCase : Union[str, Any] = 3_6.4_5_1_9
UpperCAmelCase : Optional[Any] = 0.9_0_3_4_2_1
UpperCAmelCase : Dict = 2_2_2.0_8_8
UpperCAmelCase : int = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : Any = 0.7_6_3_1_4_1
UpperCAmelCase : Tuple = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "TABFACT":
UpperCAmelCase : List[str] = TapasForSequenceClassification(config=_lowerCAmelCase )
elif task == "MLM":
UpperCAmelCase : List[str] = TapasForMaskedLM(config=_lowerCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase : List[Any] = TapasModel(config=_lowerCAmelCase )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCAmelCase )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
UpperCAmelCase : Dict = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(_lowerCAmelCase )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCamelCase__: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__: Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
a_ = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a__ ( __lowercase ) -> int:
if isinstance(__lowercase , torch.Tensor ):
return image
elif isinstance(__lowercase , PIL.Image.Image ):
_A = [image]
_A = [trans(img.convert("RGB" ) ) for img in image]
_A = torch.stack(__lowercase )
return image
class snake_case ( _UpperCamelCase):
def __init__( self : Any , a__ : Optional[int] , a__ : Optional[Any] ) -> Dict:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a__ , scheduler=a__ )
def a_ ( self : int , a__ : str ) -> Any:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def a_ ( self : Tuple , a__ : Any , a__ : Optional[int] , a__ : List[Any] ) -> Any:
'''simple docstring'''
_A = min(int(num_inference_steps * strength ) , a__ )
_A = max(num_inference_steps - init_timestep , 0 )
_A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a_ ( self : Optional[int] , a__ : str , a__ : Optional[Any] , a__ : Tuple , a__ : Dict , a__ : Any , a__ : int=None ) -> Dict:
'''simple docstring'''
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}""" )
_A = image.to(device=a__ , dtype=a__ )
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(a__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_A = init_latents.shape
_A = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
print("add noise to latents at timestep" , a__ )
_A = self.scheduler.add_noise(a__ , a__ , a__ )
_A = init_latents
return latents
@torch.no_grad()
def __call__( self : Optional[Any] , a__ : Union[torch.FloatTensor, PIL.Image.Image] = None , a__ : float = 0.8 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : float = 0.0 , a__ : int = 50 , a__ : Optional[bool] = None , a__ : Optional[str] = "pil" , a__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(a__ )
# 2. Preprocess image
_A = preprocess(a__ )
# 3. set timesteps
self.scheduler.set_timesteps(a__ , device=self.device )
_A , _A = self.get_timesteps(a__ , a__ , self.device )
_A = timesteps[:1].repeat(a__ )
# 4. Prepare latent variables
_A = self.prepare_latents(a__ , a__ , a__ , self.unet.dtype , self.device , a__ )
_A = latents
# 5. Denoising loop
for t in self.progress_bar(a__ ):
# 1. predict noise model_output
_A = self.unet(a__ , a__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_A = self.scheduler.step(
a__ , a__ , a__ , eta=a__ , use_clipped_model_output=a__ , generator=a__ , ).prev_sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(a__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a__ ) | 357 |
"""simple docstring"""
def a__ ( __lowercase=2_8123 ) -> List[Any]:
_A = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_A = set()
_A = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowercase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 163 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 245 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowercase ( _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE : List[str] = torch.exp(_A )
SCREAMING_SNAKE_CASE : List[str] = torch.sum(_A , dim=1 ) # sum of exp(x_i)
SCREAMING_SNAKE_CASE : Dict = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_A ) - B / A
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase__ : int ) ->List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = config.output_attentions
SCREAMING_SNAKE_CASE : Any = config.output_hidden_states
SCREAMING_SNAKE_CASE : str = nn.ModuleList([BertLayer(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE : str = nn.ModuleList([BertHighway(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE : Union[str, Any] = [-1 for _ in range(config.num_hidden_layers )]
def _lowercase ( self : str , UpperCAmelCase__ : str ) ->Dict:
"""simple docstring"""
if (type(UpperCAmelCase__ ) is float) or (type(UpperCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
SCREAMING_SNAKE_CASE : Tuple = x
else:
SCREAMING_SNAKE_CASE : Optional[Any] = x
def _lowercase ( self : str , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=None , ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ()
SCREAMING_SNAKE_CASE : Dict = ()
SCREAMING_SNAKE_CASE : str = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : Union[str, Any] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : int = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = layer_outputs[0]
if self.output_attentions:
SCREAMING_SNAKE_CASE : Dict = all_attentions + (layer_outputs[1],)
SCREAMING_SNAKE_CASE : Optional[int] = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : int = current_outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE : int = current_outputs + (all_attentions,)
SCREAMING_SNAKE_CASE : Dict = self.highway[i](UpperCAmelCase__ )
# logits, pooled_output
if not self.training:
SCREAMING_SNAKE_CASE : str = highway_exit[0]
SCREAMING_SNAKE_CASE : Dict = entropy(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
SCREAMING_SNAKE_CASE : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
SCREAMING_SNAKE_CASE : Dict = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1 )
else:
SCREAMING_SNAKE_CASE : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : List[str] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : Optional[int] = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : Optional[int] = outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE : Optional[int] = outputs + (all_attentions,)
SCREAMING_SNAKE_CASE : Any = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , UpperCAmelCase , )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple ) ->List[str]:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = config
SCREAMING_SNAKE_CASE : Union[str, Any] = BertEmbeddings(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = DeeBertEncoder(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = BertPooler(UpperCAmelCase__ )
self.init_weights()
def _lowercase ( self : str ) ->Optional[int]:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def _lowercase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
return self.embeddings.word_embeddings
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = value
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Dict ) ->str:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : str=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=None , ) ->int:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
SCREAMING_SNAKE_CASE : str = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
SCREAMING_SNAKE_CASE : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
SCREAMING_SNAKE_CASE : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
SCREAMING_SNAKE_CASE : Optional[int] = encoder_attention_mask[:, None, None, :]
SCREAMING_SNAKE_CASE : Optional[int] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
SCREAMING_SNAKE_CASE : str = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE : str = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE : str = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : int = encoder_outputs[0]
SCREAMING_SNAKE_CASE : int = self.pooler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = message
SCREAMING_SNAKE_CASE : str = exit_layer # start from 1!
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Any ) ->List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = BertPooler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(config.hidden_size , config.num_labels )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = encoder_outputs[0]
SCREAMING_SNAKE_CASE : int = self.pooler(UpperCAmelCase__ )
# "return" pooler_output
# BertModel
SCREAMING_SNAKE_CASE : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
SCREAMING_SNAKE_CASE : List[Any] = bmodel_output[1]
SCREAMING_SNAKE_CASE : Any = self.dropout(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = self.classifier(UpperCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , UpperCAmelCase , )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) ->int:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = config.num_labels
SCREAMING_SNAKE_CASE : int = config.num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = DeeBertModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=-1 , UpperCAmelCase__ : List[str]=False , ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_layers
try:
SCREAMING_SNAKE_CASE : str = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
SCREAMING_SNAKE_CASE : Optional[Any] = outputs[1]
SCREAMING_SNAKE_CASE : Any = self.dropout(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.classifier(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE : Optional[int] = e.message
SCREAMING_SNAKE_CASE : Optional[Any] = e.exit_layer
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE : Optional[Any] = entropy(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : List[str] = MSELoss()
SCREAMING_SNAKE_CASE : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Tuple = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE : List[Any] = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Any = MSELoss()
SCREAMING_SNAKE_CASE : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
SCREAMING_SNAKE_CASE : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE : int = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 245 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(lowerCAmelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
while a != 0:
_snake_case , _snake_case : Optional[Any] = b % a, a
return b
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
if gcd(lowerCAmelCase , lowerCAmelCase ) != 1:
_snake_case : Any = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase )
_snake_case , _snake_case , _snake_case : Optional[Any] = 1, 0, a
_snake_case , _snake_case , _snake_case : Optional[int] = 0, 1, m
while va != 0:
_snake_case : Dict = ua // va
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 260 | 1 |
"""simple docstring"""
import random
from typing import Any
def _lowercase ( __snake_case ) -> list[Any]:
for _ in range(len(__snake_case ) ):
__lowerCAmelCase : int = random.randint(0 ,len(__snake_case ) - 1 )
__lowerCAmelCase : Optional[int] = random.randint(0 ,len(__snake_case ) - 1 )
__lowerCAmelCase , __lowerCAmelCase : Dict = data[b], data[a]
return data
if __name__ == "__main__":
__snake_case : Tuple = [0, 1, 2, 3, 4, 5, 6, 7]
__snake_case : Any = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 269 |
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 269 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A : str = logging.get_logger(__name__)
A : str = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str ="""bart"""
__UpperCAmelCase : Optional[Any] =["""past_key_values"""]
__UpperCAmelCase : List[str] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __a=5_02_65 , __a=10_24 , __a=12 , __a=40_96 , __a=16 , __a=12 , __a=40_96 , __a=16 , __a=0.0 , __a=0.0 , __a="gelu" , __a=10_24 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.0_2 , __a=0.0 , __a=False , __a=True , __a=3 , __a=1 , __a=0 , __a=2 , __a=True , __a=2 , __a=2 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = classifier_dropout
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __a ):
__lowerCAmelCase = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed." )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase = {0: "batch"}
__lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(__a ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def snake_case ( self ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super().outputs
else:
__lowerCAmelCase = super(__a , self ).outputs
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(__a ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def snake_case ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ):
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__a , __a , __a , __a , __a )
# Generate decoder inputs
__lowerCAmelCase = seq_length if not self.use_past else 1
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__a , __a , __a , __a , __a )
__lowerCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowerCAmelCase = dict(**__a , **__a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
__lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = decoder_seq_length + 3
__lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__a , __a )] , dim=1 )
__lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase = min(__a , __a )
__lowerCAmelCase = max(__a , __a ) - min_num_layers
__lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__a ):
common_inputs["past_key_values"].append(
(
torch.zeros(__a ),
torch.zeros(__a ),
torch.zeros(__a ),
torch.zeros(__a ),
) )
# TODO: test this.
__lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__a , __a ):
common_inputs["past_key_values"].append((torch.zeros(__a ), torch.zeros(__a )) )
return common_inputs
def snake_case ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ):
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__a , __a , __a , __a , __a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = common_inputs["attention_mask"].dtype
__lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(__a , __a , dtype=__a )] , dim=1 )
__lowerCAmelCase = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(__a )
]
return common_inputs
def snake_case ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase = tokenizer.num_special_tokens_to_add(__a )
__lowerCAmelCase = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCAmelCase = dict(tokenizer(__a , return_tensors=__a ) )
return common_inputs
def snake_case ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
elif self.task == "causal-lm":
__lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
else:
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
return common_inputs
def snake_case ( self , __a , __a , __a , __a ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super()._flatten_past_key_values_(__a , __a , __a , __a )
else:
__lowerCAmelCase = super(__a , self )._flatten_past_key_values_(
__a , __a , __a , __a )
| 259 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 259 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase="None" , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = relative_attention
lowerCamelCase__ = position_biased_input
lowerCamelCase__ = pos_att_type
lowerCamelCase__ = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
lowerCamelCase__ = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = DebertaVaModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
lowerCamelCase__ = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 209 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """yolos"""
def __init__( self , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=[5_1_2, 8_6_4] , __lowerCAmelCase=1_6 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=1_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = num_detection_tokens
lowerCamelCase__ = use_mid_position_embeddings
lowerCamelCase__ = auxiliary_loss
# Hungarian matcher
lowerCamelCase__ = class_cost
lowerCamelCase__ = bbox_cost
lowerCamelCase__ = giou_cost
# Loss coefficients
lowerCamelCase__ = bbox_loss_coefficient
lowerCamelCase__ = giou_loss_coefficient
lowerCamelCase__ = eos_coefficient
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1_2
| 209 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> list[int]:
_lowercase : List[Any] = 2
_lowercase : List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case )
if n > 1:
factors.append(snake_case )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE : str = 'OwlViTImageProcessor'
_SCREAMING_SNAKE_CASE : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCamelCase , )
_lowercase : Optional[int] = kwargs.pop("feature_extractor" )
_lowercase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="max_length" , _UpperCamelCase="np" , **_UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ) or (isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(text[0] , _UpperCamelCase )):
_lowercase : int = [self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(text[0] , _UpperCamelCase ):
_lowercase : str = []
# Maximum number of queries across batch
_lowercase : str = max([len(_UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCamelCase ) != max_num_queries:
_lowercase : List[Any] = t + [" "] * (max_num_queries - len(_UpperCamelCase ))
_lowercase : Tuple = self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
encodings.append(_UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_lowercase : List[Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_lowercase : Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : int = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_lowercase : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_lowercase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_lowercase : Optional[int] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : List[str] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_lowercase : Optional[int] = BatchEncoding()
_lowercase : List[Any] = input_ids
_lowercase : Dict = attention_mask
if query_images is not None:
_lowercase : int = BatchEncoding()
_lowercase : Any = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ).pixel_values
_lowercase : Any = query_pixel_values
if images is not None:
_lowercase : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None and images is not None:
_lowercase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCamelCase , )
return self.image_processor
| 199 | 1 |
from typing import Any
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= data
__lowercase= None
def __repr__(self ):
return f'Node({self.data})'
class A :
def __init__(self ):
__lowercase= None
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next
def __len__(self ):
return sum(1 for _ in self )
def __repr__(self ):
return "->".join([str(lowerCAmelCase ) for item in self] )
def __getitem__(self , lowerCAmelCase ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__(self , lowerCAmelCase , lowerCAmelCase ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__lowercase= self.head
for _ in range(lowerCAmelCase ):
__lowercase= current.next
__lowercase= data
def _A (self , lowerCAmelCase ):
self.insert_nth(len(self ) , lowerCAmelCase )
def _A (self , lowerCAmelCase ):
self.insert_nth(0 , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__lowercase= Node(lowerCAmelCase )
if self.head is None:
__lowercase= new_node
elif index == 0:
__lowercase= self.head # link new_node to head
__lowercase= new_node
else:
__lowercase= self.head
for _ in range(index - 1 ):
__lowercase= temp.next
__lowercase= temp.next
__lowercase= new_node
def _A (self ): # print every node data
print(self )
def _A (self ):
return self.delete_nth(0 )
def _A (self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def _A (self , lowerCAmelCase = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__lowercase= self.head # default first node
if index == 0:
__lowercase= self.head.next
else:
__lowercase= self.head
for _ in range(index - 1 ):
__lowercase= temp.next
__lowercase= temp.next
__lowercase= temp.next.next
return delete_node.data
def _A (self ):
return self.head is None
def _A (self ):
__lowercase= None
__lowercase= self.head
while current:
# Store the current node's next node.
__lowercase= current.next
# Make the current node's next point backwards
__lowercase= prev
# Make the previous node be the current node
__lowercase= current
# Make the current node the next node (to progress iteration)
__lowercase= next_node
# Return prev in order to put the head at the end
__lowercase= prev
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(lowercase__ ) == i
linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(lowercase__ ) == 9
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__lowercase= -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(-8 , 1 ) )
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'dlrow olleH',
7,
5_5_5_5,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(1_0 ),
None,
None,
12.20,
]
__lowercase= LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__lowercase= linked_list.delete_head()
assert result == -9
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__lowercase= linked_list.delete_tail()
assert result == 12.2
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__lowercase= linked_list.delete_nth(1_0 )
assert result is None
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase__ )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
from doctest import testmod
testmod()
__lowercase= LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowercase__ )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
__lowercase= input('Enter New Value: ' ).strip()
print('New list:' )
print(lowercase__ )
print(F'length of linked_list is : {len(lowercase__ )}' )
if __name__ == "__main__":
main()
| 295 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
@register_to_config
def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowercase= Encoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , )
__lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
__lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase )
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
# pass init params to Decoder
__lowercase= Decoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= self.encoder(lowerCAmelCase )
__lowercase= self.quant_conv(lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase )
else:
__lowercase= h
__lowercase= self.post_quant_conv(lowerCAmelCase )
__lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= sample
__lowercase= self.encode(lowerCAmelCase ).latents
__lowercase= self.decode(lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
| 295 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class lowercase_ ( a__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A__ : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
A__ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
A__ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
A__ : str = "question"
A__ : str = "context"
A__ : str = "answers"
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 363 |
def lowerCamelCase__ ( a__ : list , a__ : list , a__ : int , a__ : int , a__ : int ) -> int:
if index == number_of_items:
return 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = knapsack(a__ , a__ , a__ , a__ , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase_ = values[index] + knapsack(
a__ , a__ , a__ , max_weight - weights[index] , index + 1 )
return max(a__ , a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 | 0 |
def UpperCamelCase ( __magic_name__ : float ) -> List[str]:
"""simple docstring"""
if edge <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase ( __magic_name__ : float ) -> Optional[int]:
"""simple docstring"""
if edge <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 188 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCamelCase = sys.version_info >= (3, 10)
def lowercase_ ( _lowerCamelCase : Tuple=None , _lowerCamelCase : int=None):
return field(default_factory=lambda: default , metadata=_lowerCamelCase)
@dataclass
class snake_case_ :
__A : int
__A : float
__A : str
__A : bool
@dataclass
class snake_case_ :
__A : int = 42
__A : str = field(default="toto" ,metadata={"help": "help message"} )
@dataclass
class snake_case_ :
__A : bool = False
__A : bool = True
__A : Optional[bool] = None
class snake_case_ ( __A ):
__A : str = "titi"
__A : List[str] = "toto"
class snake_case_ ( __A ):
__A : Optional[int] = "titi"
__A : Union[str, Any] = "toto"
__A : str = 42
@dataclass
class snake_case_ :
__A : BasicEnum = "toto"
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : str = BasicEnum(self.foo )
@dataclass
class snake_case_ :
__A : MixedTypeEnum = "toto"
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : int = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
__A : Optional[int] = None
__A : Optional[float] = field(default=__A ,metadata={"help": "help message"} )
__A : Optional[str] = None
__A : Optional[List[str]] = list_field(default=[] )
__A : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
__A : List[int] = list_field(default=[] )
__A : List[int] = list_field(default=[1, 2, 3] )
__A : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
__A : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
__A : List[int] = field()
__A : str = field()
__A : BasicEnum = field()
def __UpperCamelCase ( self : Any ) -> int:
lowercase__ : int = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
__A : int
__A : "BasicEnum" = field()
__A : "Optional[bool]" = None
__A : "str" = field(default="toto" ,metadata={"help": "help message"} )
__A : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
__A : bool = False
__A : bool = True
__A : bool | None = None
@dataclass
class snake_case_ :
__A : int | None = None
__A : float | None = field(default=__A ,metadata={"help": "help message"} )
__A : str | None = None
__A : list[str] | None = list_field(default=[] )
__A : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : int , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ) -> Tuple:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase__ : Dict = {k: v for k, v in vars(lowercase_ ).items() if k != "container"}
lowercase__ : List[str] = {k: v for k, v in vars(lowercase_ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , lowercase_ ) and yy.get("choices" , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](lowercase_ ) , yy["type"](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
lowercase__ : Any = HfArgumentParser(lowercase_ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--bar" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--baz" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--flag" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowercase__) , ) : str = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Optional[Any] = HfArgumentParser(lowercase_ )
lowercase__ : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=lowercase_ )
expected.add_argument("--baz" , default="toto" , type=lowercase_ , help="help message" )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Dict:
lowercase__ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
expected.add_argument("--baz" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=lowercase_ , dest="baz" )
expected.add_argument("--opt" , type=lowercase_ , default=lowercase_ )
lowercase__ : Union[str, Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase__ : str = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Tuple = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Union[str, Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Dict = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Optional[int] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
lowercase__ : List[Any] = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase__ : Any = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : int = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase__ : Dict = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowercase__ : Dict = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
@dataclass
class snake_case_ :
__A : Literal["titi", "toto", 42] = "toto"
lowercase__ : Union[str, Any] = HfArgumentParser(lowercase_ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : Tuple = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
lowercase__ : int = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=lowercase_ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase_ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase__ : List[Any] = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("--foo" , default=lowercase_ , type=lowercase_ )
expected.add_argument("--bar" , default=lowercase_ , type=lowercase_ , help="help message" )
expected.add_argument("--baz" , default=lowercase_ , type=lowercase_ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=lowercase_ )
expected.add_argument("--des" , nargs="+" , default=[] , type=lowercase_ )
lowercase__ : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase__ : Any = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
lowercase__ : Dict = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowercase__ : List[str] = HfArgumentParser(lowercase_ )
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--required_str" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : Optional[int] = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase_ , )
expected.add_argument("--opt" , type=lowercase_ , default=lowercase_ )
expected.add_argument("--baz" , default="toto" , type=lowercase_ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Dict ) -> str:
lowercase__ : str = HfArgumentParser(lowercase_ )
lowercase__ : List[Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowercase__ : Optional[Any] = parser.parse_dict(lowercase_ )[0]
lowercase__ : Dict = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : List[Any] = HfArgumentParser(lowercase_ )
lowercase__ : Any = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ : Tuple = HfArgumentParser(lowercase_ )
lowercase__ : int = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Any = os.path.join(lowercase_ , "temp_json" )
os.mkdir(lowercase_ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowercase__ : Optional[Any] = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str ) -> int:
lowercase__ : Tuple = HfArgumentParser(lowercase_ )
lowercase__ : Dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[int] = os.path.join(lowercase_ , "temp_yaml" )
os.mkdir(lowercase_ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowercase__ : int = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : List[str] = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 333 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_a = logging.get_logger(__name__)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = question_encoder
_UpperCAmelCase = generator
_UpperCAmelCase = self.question_encoder
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if os.path.isfile(UpperCAmelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'question_encoder_tokenizer' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(UpperCAmelCase )
self.generator.save_pretrained(UpperCAmelCase )
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
_UpperCAmelCase = kwargs.pop('config' , UpperCAmelCase )
if config is None:
_UpperCAmelCase = RagConfig.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
UpperCAmelCase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
UpperCAmelCase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=UpperCAmelCase , generator=UpperCAmelCase )
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.current_tokenizer(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.generator.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.generator.decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.question_encoder
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.generator
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "longest" , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ):
"""simple docstring"""
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , UpperCAmelCase , )
if max_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , max_length=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , **UpperCAmelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
text_target=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , **UpperCAmelCase , )
_UpperCAmelCase = labels['input_ids']
return model_inputs
| 39 | from math import ceil
def A ( _lowercase = 1_001 ):
SCREAMING_SNAKE_CASE : Any = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * i + 1
SCREAMING_SNAKE_CASE : int = 2 * i
SCREAMING_SNAKE_CASE : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCamelCase : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 182 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=224, lowerCAmelCase__=1000, lowerCAmelCase__=[3, 3, 6, 4], lowerCAmelCase__=[48, 56, 112, 220], ) -> List[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = num_labels
snake_case_ = image_size
snake_case_ = layer_depths
snake_case_ = embed_dims
def a_ ( self) -> int:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.num_labels)
snake_case_ = self.get_config()
return config, pixel_values, labels
def a_ ( self) -> Optional[int]:
return SwiftFormerConfig(
depths=self.layer_depths, embed_dims=self.embed_dims, mlp_ratio=4, downsamples=[True, True, True, True], hidden_act='gelu', num_labels=self.num_labels, down_patch_size=3, down_stride=2, down_pad=1, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=snake_case_, layer_scale_init_value=1e-5, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = SwiftFormerModel(config=snake_case_)
model.to(snake_case_)
model.eval()
snake_case_ = model(snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dims[-1], 7, 7))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = self.num_labels
snake_case_ = SwiftFormerForImageClassification(snake_case_)
model.to(snake_case_)
model.eval()
snake_case_ = model(snake_case_, labels=snake_case_)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
snake_case_ = SwiftFormerForImageClassification(snake_case_)
model.to(snake_case_)
model.eval()
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ = model(snake_case_)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def a_ ( self) -> List[Any]:
(snake_case_) = self.prepare_config_and_inputs()
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Any:
snake_case_ = SwiftFormerModelTester(self)
snake_case_ = ConfigTester(
self, config_class=snake_case_, has_text_modality=snake_case_, hidden_size=37, num_attention_heads=12, num_hidden_layers=12, )
def a_ ( self) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case_)
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_, nn.Linear))
def a_ ( self) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case_)
snake_case_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], snake_case_)
def a_ ( self) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_)
def a_ ( self) -> Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_)
@slow
def a_ ( self) -> List[str]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SwiftFormerModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
@unittest.skip(reason='SwiftFormer does not output attentions')
def a_ ( self) -> Union[str, Any]:
pass
def a_ ( self) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = model_class(snake_case_)
model.to(snake_case_)
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(snake_case_, snake_case_))
snake_case_ = outputs.hidden_states
snake_case_ = 8
self.assertEqual(len(snake_case_), snake_case_) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(snake_case_)):
self.assertEqual(
hidden_states[i].shape, torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
]), )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(snake_case_, snake_case_, snake_case_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(snake_case_, snake_case_, snake_case_)
def a_ ( self) -> List[Any]:
def _config_zero_init(lowerCAmelCase__):
snake_case_ = copy.deepcopy(snake_case_)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(snake_case_, snake_case_, 1e-10)
if isinstance(getattr(snake_case_, snake_case_, snake_case_), snake_case_):
snake_case_ = _config_zero_init(getattr(snake_case_, snake_case_))
setattr(snake_case_, snake_case_, snake_case_)
return configs_no_init
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(snake_case_)
for model_class in self.all_model_classes:
snake_case_ = model_class(config=snake_case_)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def a_ ( self) -> str:
pass
def UpperCAmelCase ( ) -> str:
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def a_ ( self) -> List[str]:
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs') if is_vision_available() else None
@slow
def a_ ( self) -> str:
snake_case_ = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs').to(snake_case_)
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=snake_case_, return_tensors='pt').to(snake_case_)
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case_)
# verify the logits
snake_case_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, snake_case_)
snake_case_ = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]]).to(snake_case_)
self.assertTrue(torch.allclose(outputs.logits[0, :3], snake_case_, atol=1e-4))
| 352 | """simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 1 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = (DDPMScheduler,)
def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = timesteps[i + 1]
__SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 331 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict )-> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : Optional[int], lowerCamelCase : int = 1, lowerCamelCase : int = 100, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : bool = True, )-> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
lowerCamelCase__ : int =self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : List[str] =audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : Any =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Optional[int] =int(lowerCamelCase )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Tuple =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
lowerCamelCase__ : int =int(lowerCamelCase )
lowerCamelCase__ : str =next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : str =randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
# set step values
self.scheduler.set_timesteps(lowerCamelCase, device=audio.device )
lowerCamelCase__ : Any =self.scheduler.timesteps.to(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : str =self.unet(lowerCamelCase, lowerCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : Dict =self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
lowerCamelCase__ : Optional[int] =audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : List[Any] =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase )
| 238 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict="pt" ):
"""simple docstring"""
lowerCamelCase__ : str ={'''add_prefix_space''': True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(''' ''' ) else {}
lowerCamelCase__ : int =padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
lowerCamelCase__ : Any =input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : str="train", lowerCamelCase : List[Any]=None, lowerCamelCase : Tuple=None, lowerCamelCase : List[str]=None, lowerCamelCase : int="", )-> List[Any]:
super().__init__()
lowerCamelCase__ : Tuple =Path(lowerCamelCase ).joinpath(type_path + '''.source''' )
lowerCamelCase__ : str =Path(lowerCamelCase ).joinpath(type_path + '''.target''' )
lowerCamelCase__ : Dict =self.get_char_lens(self.src_file )
lowerCamelCase__ : Tuple =max_source_length
lowerCamelCase__ : Optional[int] =max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
lowerCamelCase__ : Dict =tokenizer
lowerCamelCase__ : List[str] =prefix
if n_obs is not None:
lowerCamelCase__ : int =self.src_lens[:n_obs]
lowerCamelCase__ : Dict =src_lang
lowerCamelCase__ : Tuple =tgt_lang
def __len__( self : Dict )-> Optional[int]:
return len(self.src_lens )
def __getitem__( self : List[str], lowerCamelCase : Optional[int] )-> Dict[str, torch.Tensor]:
lowerCamelCase__ : List[Any] =index + 1 # linecache starts at 1
lowerCamelCase__ : Optional[int] =self.prefix + linecache.getline(str(self.src_file ), lowerCamelCase ).rstrip('''\n''' )
lowerCamelCase__ : Optional[Any] =linecache.getline(str(self.tgt_file ), lowerCamelCase ).rstrip('''\n''' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer, lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCamelCase__ : Optional[int] =(
self.tokenizer.question_encoder if isinstance(self.tokenizer, lowerCamelCase ) else self.tokenizer
)
lowerCamelCase__ : Tuple =self.tokenizer.generator if isinstance(self.tokenizer, lowerCamelCase ) else self.tokenizer
lowerCamelCase__ : Optional[int] =encode_line(lowerCamelCase, lowerCamelCase, self.max_source_length, '''right''' )
lowerCamelCase__ : str =encode_line(lowerCamelCase, lowerCamelCase, self.max_target_length, '''right''' )
lowerCamelCase__ : str =source_inputs['''input_ids'''].squeeze()
lowerCamelCase__ : str =target_inputs['''input_ids'''].squeeze()
lowerCamelCase__ : Union[str, Any] =source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case ( lowerCamelCase : Union[str, Any] )-> Optional[int]:
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def snake_case ( self : str, lowerCamelCase : str )-> Dict[str, torch.Tensor]:
lowerCamelCase__ : List[Any] =torch.stack([x['''input_ids'''] for x in batch] )
lowerCamelCase__ : int =torch.stack([x['''attention_mask'''] for x in batch] )
lowerCamelCase__ : Union[str, Any] =torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCamelCase__ : str =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCamelCase__ : List[str] =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCamelCase__ : Optional[int] =trim_batch(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Any =trim_batch(lowerCamelCase, lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] ={
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
_lowercase : Any = getLogger(__name__)
def snake_case__ ( __lowerCamelCase : List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Dict =get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , '''git_log.json''' ) )
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=4 , **__lowerCamelCase : int ):
"""simple docstring"""
with open(__lowerCamelCase , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =git.Repo(search_parent_directories=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={
'''repo_id''': str(__lowerCamelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case__ ( __lowerCamelCase : Callable , __lowerCamelCase : Iterable ):
"""simple docstring"""
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
"""simple docstring"""
with open(__lowerCamelCase , '''wb''' ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
def remove_articles(__lowerCamelCase : List[Any] ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : Any ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Optional[Any] ):
lowerCamelCase__ : Tuple =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =normalize_answer(__lowerCamelCase ).split()
lowerCamelCase__ : List[str] =normalize_answer(__lowerCamelCase ).split()
lowerCamelCase__ : Optional[int] =Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =sum(common.values() )
if num_same == 0:
return 0
lowerCamelCase__ : Dict =1.0 * num_same / len(__lowerCamelCase )
lowerCamelCase__ : List[str] =1.0 * num_same / len(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =(2 * precision * recall) / (precision + recall)
return fa
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : int ):
"""simple docstring"""
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
"""simple docstring"""
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowerCamelCase__ : Any =0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Any ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCamelCase__ : Optional[int] ='''dropout_rate'''
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
lowerCamelCase__ : List[Any] =p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 238 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''spiece.model'''}
A__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
A__ = '''▁'''
class a ( __lowerCamelCase ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self :Tuple ,__lowercase :List[Any] ,__lowercase :Optional[Any]="</s>" ,__lowercase :Optional[Any]="<unk>" ,__lowercase :Optional[Any]="<pad>" ,__lowercase :str=1_0_0 ,__lowercase :Optional[int]=None ,__lowercase :Optional[Dict[str, Any]] = None ,__lowercase :Optional[Any]=True ,**__lowercase :List[Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(__lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ : List[Any] = len(set(filter(lambda __lowercase : bool('''extra_id''' in str(__lowercase ) ) ,__lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
snake_case__ : Optional[int] = legacy
snake_case__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase ,unk_token=__lowercase ,pad_token=__lowercase ,extra_ids=__lowercase ,additional_special_tokens=__lowercase ,sp_model_kwargs=self.sp_model_kwargs ,legacy=__lowercase ,**__lowercase ,)
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : Union[str, Any] = extra_ids
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@staticmethod
def __lowerCamelCase ( __lowercase :Dict ,__lowercase :Optional[int] ,__lowercase :Any ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case__ : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' ,__lowercase ,)
return max_model_length
@property
def __lowerCamelCase ( self :int ):
return self.sp_model.get_piece_size() + self._extra_ids
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self :List[str] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ,__lowercase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase ,token_ids_a=__lowercase ,already_has_special_tokens=__lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__lowercase )) + [1]
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
def __lowerCamelCase ( self :Union[str, Any] ):
return list(
set(filter(lambda __lowercase : bool(re.search(r'''<extra_id_\d+>''' ,__lowercase ) ) is not None ,self.additional_special_tokens ) ) )
def __lowerCamelCase ( self :Tuple ):
return [self._convert_token_to_id(__lowercase ) for token in self.get_sentinel_tokens()]
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[int] ):
if len(__lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCamelCase ( self :Tuple ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCamelCase ( self :List[str] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : List[str] = self._add_eos_if_not_present(__lowercase )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ : int = self._add_eos_if_not_present(__lowercase )
return token_ids_a + token_ids_a
def __getstate__( self :Any ):
snake_case__ : str = self.__dict__.copy()
snake_case__ : Union[str, Any] = None
return state
def __setstate__( self :str ,__lowercase :Union[str, Any] ):
snake_case__ : Tuple = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
snake_case__ : Optional[int] = {}
snake_case__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self :Dict ,__lowercase :"TextInput" ,**__lowercase :Tuple ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
snake_case__ : Optional[Any] = SPIECE_UNDERLINE + text.replace(__lowercase ,''' ''' )
return super().tokenize(__lowercase ,**__lowercase )
def __lowerCamelCase ( self :Any ,__lowercase :Dict ,**__lowercase :Any ):
if not self.legacy:
snake_case__ : List[Any] = text.startswith(__lowercase )
if is_first:
snake_case__ : Dict = text[1:]
snake_case__ : Dict = self.sp_model.encode(__lowercase ,out_type=__lowercase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(__lowercase ):
snake_case__ : str = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ):
if token.startswith('''<extra_id_''' ):
snake_case__ : List[str] = re.match(r'''<extra_id_(\d+)>''' ,__lowercase )
snake_case__ : List[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[Any] ):
if index < self.sp_model.get_piece_size():
snake_case__ : int = self.sp_model.IdToPiece(__lowercase )
else:
snake_case__ : Any = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def __lowerCamelCase ( self :Optional[int] ,__lowercase :str ):
snake_case__ : int = []
snake_case__ : List[Any] = ''''''
snake_case__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = []
else:
current_sub_tokens.append(__lowercase )
snake_case__ : Any = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : Union[str, Any] = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase ,'''wb''' ) as fi:
snake_case__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 44 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 44 | 1 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a__ ( lowercase : str = "", ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(''' ''', '''''' ).lower() ).values() ) < 2
def a__ ( lowercase : str = "" ) -> bool:
"""simple docstring"""
if len(lowercase ) == 0:
return True
_UpperCamelCase = input_str.replace(''' ''', '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCamelCase = {}
for character in lower_case_input_str:
_UpperCamelCase = character_freq_dict.get(lowercase, 0 ) + 1
_UpperCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a__ ( lowercase : str = "" ) -> None:
"""simple docstring"""
print('''\nFor string = ''', lowercase, ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''', '''\tans =''', can_string_be_rearranged_as_palindrome_counter(lowercase ), '''\ttime =''', timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''', setup='''import __main__ as z''', ), '''seconds''', )
print(
'''> can_string_be_rearranged_as_palindrome()''', '''\tans =''', can_string_be_rearranged_as_palindrome(lowercase ), '''\ttime =''', timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''', setup='''import __main__ as z''', ), '''seconds''', )
if __name__ == "__main__":
lowercase__ : Tuple = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
lowercase__ : List[str] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 324 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : str = StableDiffusionDiffEditPipeline
lowerCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
lowerCAmelCase_ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
lowerCAmelCase_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase_ : Tuple = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
UpperCAmelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
UpperCAmelCase__ = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_zero=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
UpperCAmelCase__ = CLIPTextModel(_UpperCAmelCase )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int=0 ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((1, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=0 ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any]=0 ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase__ = self.get_dummy_inputs(_UpperCAmelCase )
UpperCAmelCase__ = pipe(**_UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = self.pipeline_class.from_pretrained(_UpperCAmelCase )
pipe_loaded.to(_UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_UpperCAmelCase , _UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase__ = self.get_dummy_inputs(_UpperCAmelCase )
UpperCAmelCase__ = pipe_loaded(**_UpperCAmelCase )[0]
UpperCAmelCase__ = np.abs(output - output_loaded ).max()
self.assertLess(_UpperCAmelCase , 1E-4 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = self.get_dummy_mask_inputs(_UpperCAmelCase )
UpperCAmelCase__ = pipe.generate_mask(**_UpperCAmelCase )
UpperCAmelCase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase__ = np.array([0] * 9 )
UpperCAmelCase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
UpperCAmelCase__ = pipe.invert(**_UpperCAmelCase ).images
UpperCAmelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase__ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = {"""beta_start""": 0.0_0085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
UpperCAmelCase__ = DPMSolverMultistepScheduler(**_UpperCAmelCase )
UpperCAmelCase__ = DPMSolverMultistepInverseScheduler(**_UpperCAmelCase )
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
UpperCAmelCase__ = pipe.invert(**_UpperCAmelCase ).images
UpperCAmelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase__ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str ):
"""simple docstring"""
UpperCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCAmelCase__ = raw_image.convert("""RGB""" ).resize((7_68, 7_68) )
UpperCAmelCase__ = raw_image
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
UpperCAmelCase__ = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = """a bowl of fruit"""
UpperCAmelCase__ = """a bowl of pears"""
UpperCAmelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
UpperCAmelCase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase ).latents
UpperCAmelCase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCAmelCase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
UpperCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = """a bowl of fruit"""
UpperCAmelCase__ = """a bowl of pears"""
UpperCAmelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
UpperCAmelCase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase , num_inference_steps=25 , ).latents
UpperCAmelCase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCAmelCase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 61 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 | 1 |
class __snake_case :
'''simple docstring'''
def __init__( self : int ):
__snake_case: dict[str, TrieNode] = {} # Mapping from char to TrieNode
__snake_case: Union[str, Any] = False
def UpperCAmelCase__ ( self : Tuple , A : list[str] ):
for word in words:
self.insert(A )
def UpperCAmelCase__ ( self : Optional[Any] , A : str ):
__snake_case: int = self
for char in word:
if char not in curr.nodes:
__snake_case: Optional[int] = TrieNode()
__snake_case: Tuple = curr.nodes[char]
__snake_case: Any = True
def UpperCAmelCase__ ( self : Dict , A : str ):
__snake_case: List[str] = self
for char in word:
if char not in curr.nodes:
return False
__snake_case: Dict = curr.nodes[char]
return curr.is_leaf
def UpperCAmelCase__ ( self : List[Any] , A : str ):
def _delete(A : TrieNode , A : str , A : int ) -> bool:
if index == len(A ):
# If word does not exist
if not curr.is_leaf:
return False
__snake_case: Optional[Any] = False
return len(curr.nodes ) == 0
__snake_case: Optional[Any] = word[index]
__snake_case: Union[str, Any] = curr.nodes.get(A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__snake_case: Any = _delete(A , A , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A , 0 )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> None:
if node.is_leaf:
print(SCREAMING_SNAKE_CASE__ , end=""" """)
for key, value in node.nodes.items():
print_words(SCREAMING_SNAKE_CASE__ , word + key)
def A__ ( ) -> bool:
__snake_case: Any = """banana bananas bandana band apple all beast""".split()
__snake_case: str = TrieNode()
root.insert_many(SCREAMING_SNAKE_CASE__)
# print_words(root, "")
assert all(root.find(SCREAMING_SNAKE_CASE__) for word in words)
assert root.find("""banana""")
assert not root.find("""bandanas""")
assert not root.find("""apps""")
assert root.find("""apple""")
assert root.find("""all""")
root.delete("""all""")
assert not root.find("""all""")
root.delete("""banana""")
assert not root.find("""banana""")
assert root.find("""bananas""")
return True
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> None:
print(str(SCREAMING_SNAKE_CASE__) , """works!""" if passes else """doesn't work :(""")
def A__ ( ) -> None:
assert test_trie()
def A__ ( ) -> None:
print_results("""Testing trie functionality""" , test_trie())
if __name__ == "__main__":
main()
| 111 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = GPTSanJapaneseTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCAmelCase__ ( self : Union[str, Any] ):
super().setUp()
# fmt: off
__snake_case: str = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__snake_case: List[Any] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__snake_case: Optional[int] = {"""unk_token""": """<unk>"""}
__snake_case: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def UpperCAmelCase__ ( self : Optional[int] , **A : int ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Dict ):
__snake_case: Tuple = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__snake_case: str = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[int] ):
__snake_case , __snake_case: Optional[int] = self.get_input_output_texts(A )
__snake_case: Optional[Any] = tokenizer.encode(A , add_special_tokens=A )
__snake_case: int = tokenizer.decode(A , clean_up_tokenization_spaces=A )
return text, ids
def UpperCAmelCase__ ( self : int ):
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Dict ):
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Optional[int] ):
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = self.get_tokenizer()
# Testing tokenization
__snake_case: List[str] = """こんにちは、世界。 こんばんは、㔺界。"""
__snake_case: Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__snake_case: Any = tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
__snake_case: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case: str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
__snake_case: Optional[int] = tokens + [tokenizer.unk_token]
__snake_case: List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case: str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = self.get_tokenizer()
# Testing tokenization
__snake_case: Optional[Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__snake_case: Optional[Any] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__snake_case: Union[str, Any] = tokenizer.encode(A )
__snake_case: List[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
@slow
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case: Any = """こんにちは、世界。"""
__snake_case: Tuple = """こんばんは、㔺界。😀"""
__snake_case: Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__snake_case: int = tokenizer.encode(prefix_text + input_text )
__snake_case: Union[str, Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__snake_case: Tuple = tokenizer.encode(A , prefix_text=A )
__snake_case: Union[str, Any] = tokenizer.decode(A )
__snake_case: Dict = tokenizer.decode(A )
__snake_case: Optional[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
@slow
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case: Optional[int] = """こんにちは、世界。"""
__snake_case: Any = """こんばんは、㔺界。😀"""
__snake_case: Optional[int] = len(tokenizer.encode(A ) ) - 2
__snake_case: str = len(tokenizer.encode(A ) ) - 2
__snake_case: Dict = [1] + [0] * (len_prefix + len_text + 1)
__snake_case: str = [1] * (len_prefix + len_text + 1) + [0]
__snake_case: List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case: int = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case: Optional[int] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case: Tuple = tokenizer(A , prefix_text=A ).token_type_ids
self.assertListEqual(A , A )
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case: int = tokenizer.encode("""あンいワ""" )
__snake_case: Optional[int] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__snake_case: List[Any] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertNotEqual(A , A )
self.assertNotEqual(A , A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase__ ( self : int ):
__snake_case: List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case: Union[str, Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__snake_case: Optional[int] = tokenizer(A , padding=A )
__snake_case: int = tokenizer.batch_encode_plus(A , padding=A )
# fmt: off
__snake_case: List[str] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
__snake_case: int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , A )
self.assertListEqual(x_token.token_type_ids , A )
self.assertListEqual(x_token.attention_mask , A )
self.assertListEqual(x_token_a.input_ids , A )
self.assertListEqual(x_token_a.token_type_ids , A )
self.assertListEqual(x_token_a.attention_mask , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase__ ( self : List[str] ):
# tokenizer has no padding token
pass
| 111 | 1 |
import os
import platform
import sys
UpperCamelCase_ = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 358 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = value
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tree
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : str = '''upernet'''
def __init__( self : Dict , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=[1, 2, 3, 6] , lowerCAmelCase__ : int=True , lowerCAmelCase__ : str=0.4 , lowerCAmelCase__ : Optional[Any]=3_8_4 , lowerCAmelCase__ : int=2_5_6 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Optional[Any]=2_5_5 , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = backbone_config.get("""model_type""" )
__SCREAMING_SNAKE_CASE : Any = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE : Optional[Any] = config_class.from_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = backbone_config
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = pool_scales
__SCREAMING_SNAKE_CASE : Any = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Tuple = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : str = auxiliary_in_channels
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Any = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : int = loss_ignore_index
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : int = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = self.__class__.model_type
return output | 112 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list ):
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(_lowerCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 112 | 1 |
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
A: List[str] = num - 1
A: Any = 0
while s % 2 == 0:
A: str = s // 2
t += 1
for _ in range(5 ):
A: List[Any] = random.randrange(2 , num - 1 )
A: List[str] = pow(__lowercase , __lowercase , __lowercase )
if v != 1:
A: Optional[int] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A: Optional[Any] = i + 1
A: Union[str, Any] = (v**2) % num
return True
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if num < 2:
return False
A: str = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase = 1_0_2_4 ) -> int:
while True:
A: int = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 354 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected
| 22 | 0 |
"""simple docstring"""
from __future__ import annotations
def A_ ( snake_case_ : Any ,snake_case_ : Dict ,snake_case_ : str ,snake_case_ : Tuple ,snake_case_ : Union[str, Any] ,):
'''simple docstring'''
UpperCamelCase : Optional[Any] = len(lowercase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowercase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,lowercase__ ,lowercase__ ,)
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
UpperCamelCase : list[list[str]] = []
depth_first_search([] ,[] ,[] ,lowercase__ ,lowercase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowercase__ )
print("""""" )
print(len(lowercase__ ) ,"""solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 362 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : Optional[Any] = 16
__A : str = 32
def A_ ( snake_case_ : Accelerator ,snake_case_ : int = 1_6 ):
'''simple docstring'''
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase : Optional[int] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(snake_case_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case_ ,max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : Optional[Any] = datasets.map(
snake_case_ ,batched=snake_case_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : str = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
UpperCamelCase : Any = 8
else:
UpperCamelCase : Optional[Any] = None
return tokenizer.pad(
snake_case_ ,padding="""longest""" ,max_length=snake_case_ ,pad_to_multiple_of=snake_case_ ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
UpperCamelCase : str = DataLoader(
tokenized_datasets["""train"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
UpperCamelCase : Dict = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A : int = mocked_dataloaders # noqa: F811
def A_ ( snake_case_ : Tuple ,snake_case_ : Dict ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,snake_case_ ) == "1":
UpperCamelCase : Union[str, Any] = 2
# New Code #
UpperCamelCase : Dict = int(args.gradient_accumulation_steps )
UpperCamelCase : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : str = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config["""lr"""]
UpperCamelCase : int = int(config["""num_epochs"""] )
UpperCamelCase : int = int(config["""seed"""] )
UpperCamelCase : List[Any] = int(config["""batch_size"""] )
UpperCamelCase : Optional[int] = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(snake_case_ )
UpperCamelCase , UpperCamelCase : Dict = get_dataloaders(snake_case_ ,snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[Any] = AdamW(params=model.parameters() ,lr=snake_case_ )
# Instantiate scheduler
UpperCamelCase : str = get_linear_schedule_with_warmup(
optimizer=snake_case_ ,num_warmup_steps=1_0_0 ,num_training_steps=(len(snake_case_ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
with LocalSGD(
accelerator=snake_case_ ,model=snake_case_ ,local_sgd_steps=snake_case_ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
UpperCamelCase : Optional[Any] = model(**snake_case_ )
UpperCamelCase : Optional[int] = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Any = model(**snake_case_ )
UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case_ ,references=snake_case_ ,)
UpperCamelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' ,snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=snake_case_ ,default=snake_case_ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=snake_case_ ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=snake_case_ ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : List[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(snake_case_ ,snake_case_ )
if __name__ == "__main__":
main()
| 27 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 1 |
def lowerCAmelCase ( _lowerCAmelCase : int = 100 ):
"""simple docstring"""
UpperCAmelCase__ = set()
UpperCAmelCase__ = 0
UpperCAmelCase__ = n + 1 # maximum limit
for a in range(2 , _lowerCAmelCase ):
for b in range(2 , _lowerCAmelCase ):
UpperCAmelCase__ = a**b # calculates the current power
collect_powers.add(_lowerCAmelCase ) # adds the result to the set
return len(_lowerCAmelCase )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 169 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Dict = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'data2vec-vision'
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=[3, 5, 7, 11] , _lowerCamelCase=[1, 2, 3, 6] , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=256 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=255 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Tuple = hidden_size
a :Any = num_hidden_layers
a :Optional[int] = num_attention_heads
a :Dict = intermediate_size
a :List[Any] = hidden_act
a :List[str] = hidden_dropout_prob
a :Union[str, Any] = attention_probs_dropout_prob
a :Any = initializer_range
a :Any = layer_norm_eps
a :Union[str, Any] = image_size
a :int = patch_size
a :Optional[int] = num_channels
a :Union[str, Any] = use_mask_token
a :Optional[Any] = use_absolute_position_embeddings
a :Tuple = use_relative_position_bias
a :List[Any] = use_shared_relative_position_bias
a :Dict = layer_scale_init_value
a :Optional[int] = drop_path_rate
a :List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
a :str = out_indices
a :Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
a :List[Any] = use_auxiliary_head
a :List[Any] = auxiliary_loss_weight
a :Optional[int] = auxiliary_channels
a :List[str] = auxiliary_num_convs
a :str = auxiliary_concat_input
a :Union[str, Any] = semantic_loss_ignore_index
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-4
| 281 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# fmt: off
a :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
a :List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = '''tester'''
a :Union[str, Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :Any = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
a :str = tokenizer.encode([special_token] , add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
a :Tuple = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a , a :Tuple = self.get_input_output_texts(_lowerCamelCase )
a :Tuple = tokenizer.tokenize(_lowerCamelCase )
a :int = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
a :Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Any = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertNotEqual(len(_lowerCamelCase ) , 0 )
a :str = tokenizer.decode(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCamelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 281 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict:
'''simple docstring'''
snake_case : Tuple = 10
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = [1, 2, 3, 4]
snake_case : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case__ , self.block_size , 0 ) , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
snake_case : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case__ , self.block_size , 0 ) , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
snake_case : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case__ , self.block_size , 0 ) , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
snake_case , snake_case : int = process_story(snake_case__ )
self.assertEqual(snake_case__ , [] )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = ""
snake_case , snake_case : Any = process_story(snake_case__ )
self.assertEqual(snake_case__ , [] )
self.assertEqual(snake_case__ , [] )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
snake_case , snake_case : Dict = process_story(snake_case__ )
snake_case : Union[str, Any] = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(snake_case__ , snake_case__ )
snake_case : Union[str, Any] = ["It was the best of times."]
self.assertEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : int = torch.tensor([1, 2, 3, 4] )
snake_case : Tuple = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case__ , 0 ).numpy() , expected.numpy() )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
snake_case : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case__ , 23 ).numpy() , expected.numpy() )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : int = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case__ , 1 ).numpy() , expected.numpy() )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = 1_01
snake_case : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
snake_case : Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case : int = compute_token_type_ids(snake_case__ , snake_case__ )
np.testing.assert_array_equal(snake_case__ , snake_case__ )
| 59 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
__lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = LxmertTokenizer
def __init__(self : Dict , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) )
snake_case : str = do_lower_case
snake_case : List[Any] = strip_accents
snake_case : Optional[int] = tokenize_chinese_chars
snake_case : int = normalizer_class(**snake_case__ )
snake_case : Optional[Any] = do_lower_case
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 59 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return vector * sigmoid(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase__ , lowerCamelCase__ ) -> bool:
__lowerCamelCase : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowerCamelCase : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__lowerCamelCase : Any = proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(lowerCamelCase__ , lowerCamelCase__ ) ) for _ in range(lowerCamelCase__ ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 ) -> None:
def identity_function(lowerCamelCase__ ) -> float:
return x
__lowerCamelCase : str = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print('******************' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
def function_to_integrate(lowerCamelCase__ ) -> float:
return sqrt(4.0 - x * x )
__lowerCamelCase : Any = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None | 312 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 1000 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = -1
__lowerCamelCase : int = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCamelCase : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCamelCase : List[Any] = n - a - b
if c * c == (a * a + b * b):
__lowerCamelCase : int = a * b * c
if candidate >= product:
__lowerCamelCase : Dict = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""") | 371 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = ["pixel_values"]
def __init__( self : List[str] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : List[str] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : int = size if size is not None else {"height": 256, "width": 256}
__lowerCamelCase : str = get_size_dict(UpperCAmelCase )
__lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
__lowerCamelCase : Optional[Any] = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Any = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : str = resample
__lowerCamelCase : str = do_center_crop
__lowerCamelCase : List[str] = crop_size
__lowerCamelCase : Union[str, Any] = do_rescale
__lowerCamelCase : List[Any] = rescale_factor
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ):
__lowerCamelCase : int = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ):
__lowerCamelCase : Optional[int] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : int , ):
__lowerCamelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Tuple = resample if resample is not None else self.resample
__lowerCamelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : int = image_std if image_std is not None else self.image_std
__lowerCamelCase : Optional[int] = size if size is not None else self.size
__lowerCamelCase : Optional[Any] = get_size_dict(UpperCAmelCase )
__lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Dict = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Optional[Any] = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowerCamelCase : Optional[int] = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase : Optional[int] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
__lowerCamelCase : int = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase : List[str] = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase : Optional[int] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
__lowerCamelCase : Dict = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
__lowerCamelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase ) | 64 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
@dataclass
class snake_case :
def __init__( self : Any , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=6.0 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any="fp4" , UpperCamelCase__ : int=False , **UpperCamelCase__ : str , )-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = load_in_abit
__lowerCAmelCase: Union[str, Any] = load_in_abit
__lowerCAmelCase: Optional[int] = llm_inta_threshold
__lowerCAmelCase: Union[str, Any] = llm_inta_skip_modules
__lowerCAmelCase: Dict = llm_inta_enable_fpaa_cpu_offload
__lowerCAmelCase: List[str] = llm_inta_has_fpaa_weight
__lowerCAmelCase: Tuple = bnb_abit_quant_type
__lowerCAmelCase: Union[str, Any] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__lowerCAmelCase: Tuple = torch.floataa
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__)
elif isinstance(UpperCamelCase__ , torch.dtype):
__lowerCAmelCase: Any = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
self.post_init()
def lowercase_ ( self : str)-> int:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , UpperCamelCase__):
raise ValueError("llm_int8_threshold must be a float")
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCamelCase__):
raise ValueError("llm_int8_skip_modules must be a list of strings")
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCamelCase__):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean")
if not isinstance(self.llm_inta_has_fpaa_weight , UpperCamelCase__):
raise ValueError("llm_int8_has_fp16_weight must be a boolean")
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype")
if not isinstance(self.bnb_abit_quant_type , UpperCamelCase__):
raise ValueError("bnb_4bit_quant_type must be a string")
if not isinstance(self.bnb_abit_use_double_quant , UpperCamelCase__):
raise ValueError("bnb_4bit_use_double_quant must be a boolean")
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse(
"0.39.0"):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version")
def lowercase_ ( self : Any)-> Dict:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowercase_ ( cls : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = cls(**UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = []
for key, value in kwargs.items():
if hasattr(UpperCamelCase__ , UpperCamelCase__):
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
to_remove.append(UpperCamelCase__)
for key in to_remove:
kwargs.pop(UpperCamelCase__ , UpperCamelCase__)
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowercase_ ( self : str , UpperCamelCase__ : Union[str, os.PathLike])-> Tuple:
'''simple docstring'''
with open(UpperCamelCase__ , "w" , encoding="utf-8") as writer:
__lowerCAmelCase: Optional[Any] = self.to_dict()
__lowerCAmelCase: List[Any] = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__) + "\n"
writer.write(UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> Dict[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = copy.deepcopy(self.__dict__)
__lowerCAmelCase: Union[str, Any] = str(output["bnb_4bit_compute_dtype"]).split(".")[1]
return output
def __repr__( self : Union[str, Any])-> int:
'''simple docstring'''
return f"{self.__class__.__name__} {self.to_json_string()}"
def lowercase_ ( self : List[Any] , UpperCamelCase__ : bool = True)-> str:
'''simple docstring'''
if use_diff is True:
__lowerCAmelCase: Dict = self.to_diff_dict()
else:
__lowerCAmelCase: Optional[Any] = self.to_dict()
return json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__) + "\n"
def lowercase_ ( self : Union[str, Any])-> Dict[str, Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.to_dict()
# get the default config dict
__lowerCAmelCase: Dict = BitsAndBytesConfig().to_dict()
__lowerCAmelCase: List[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__lowerCAmelCase: Optional[Any] = value
return serializable_config_dict
| 217 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
class snake_case :
def __init__( self : int , UpperCamelCase__ : Optional[int])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = metric_id
class snake_case :
SCREAMING_SNAKE_CASE_ : List[Any] = [MetricMock(__snake_case ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if "tmp_path" in args:
__lowerCAmelCase: Tuple = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__SCREAMING_SNAKE_CASE , match="https://huggingface.co/docs/evaluate" ):
func(*__SCREAMING_SNAKE_CASE )
| 217 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = '''bert-generation'''
def __init__( self , _lowercase=50_358 , _lowercase=1_024 , _lowercase=24 , _lowercase=16 , _lowercase=4_096 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase=2 , _lowercase=1 , _lowercase="absolute" , _lowercase=True , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
| 229 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
lowerCAmelCase :List[Any] = TypeVar('''T''')
class _lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Dict , _A : str ) -> None:
__magic_name__ : Any = data
__magic_name__ : Any = self
__magic_name__ : Tuple = 0
class _lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : int ) -> None:
__magic_name__ : dict[T, DisjointSetTreeNode[T]] = {}
def __lowerCAmelCase ( self : int , _A : Tuple ) -> None:
__magic_name__ : Tuple = DisjointSetTreeNode(_A )
def __lowerCAmelCase ( self : List[str] , _A : Any ) -> DisjointSetTreeNode[T]:
__magic_name__ : Union[str, Any] = self.map[data]
if elem_ref != elem_ref.parent:
__magic_name__ : Any = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def __lowerCAmelCase ( self : Dict , _A : Any , _A : List[Any] ) -> None:
if nodea.rank > nodea.rank:
__magic_name__ : Optional[int] = nodea
else:
__magic_name__ : Union[str, Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __lowerCAmelCase ( self : Dict , _A : int , _A : Optional[int] ) -> None:
self.link(self.find_set(_A ) , self.find_set(_A ) )
class _lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : str ) -> None:
__magic_name__ : dict[T, dict[T, int]] = {}
def __lowerCAmelCase ( self : List[Any] , _A : str ) -> None:
if node not in self.connections:
__magic_name__ : Union[str, Any] = {}
def __lowerCAmelCase ( self : Dict , _A : List[Any] , _A : Optional[Any] , _A : List[str] ) -> None:
self.add_node(_A )
self.add_node(_A )
__magic_name__ : Tuple = weight
__magic_name__ : Any = weight
def __lowerCAmelCase ( self : Tuple ) -> GraphUndirectedWeighted[T]:
__magic_name__ : List[Any] = []
__magic_name__ : Optional[int] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _A : x[2] )
# creating the disjoint set
__magic_name__ : List[str] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_A )
# MST generation
__magic_name__ : Optional[Any] = 0
__magic_name__ : Dict = 0
__magic_name__ : int = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__magic_name__ : Union[str, Any] = edges[index]
index += 1
__magic_name__ : List[str] = disjoint_set.find_set(_A )
__magic_name__ : Optional[Any] = disjoint_set.find_set(_A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_A , _A , _A )
disjoint_set.union(_A , _A )
return graph | 331 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[str] = 1
while repunit:
_UpperCAmelCase : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Any = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """data2vec-vision"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Dict=1_2 , UpperCamelCase__ : List[Any]=1_2 , UpperCamelCase__ : Dict=3_0_7_2 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Optional[int]=0.0_2 , UpperCamelCase__ : Optional[int]=1E-1_2 , UpperCamelCase__ : Union[str, Any]=2_2_4 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=[3, 5, 7, 1_1] , UpperCamelCase__ : List[str]=[1, 2, 3, 6] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=0.4 , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str=2_5_5 , **UpperCamelCase__ : Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = use_mask_token
UpperCamelCase = use_absolute_position_embeddings
UpperCamelCase = use_relative_position_bias
UpperCamelCase = use_shared_relative_position_bias
UpperCamelCase = layer_scale_init_value
UpperCamelCase = drop_path_rate
UpperCamelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase = out_indices
UpperCamelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase = use_auxiliary_head
UpperCamelCase = auxiliary_loss_weight
UpperCamelCase = auxiliary_channels
UpperCamelCase = auxiliary_num_convs
UpperCamelCase = auxiliary_concat_input
UpperCamelCase = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 360 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@staticmethod
@abstractmethod
def A ( UpperCamelCase__ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def A ( self : str ):
"""simple docstring"""
raise NotImplementedError()
| 249 | 0 |
__lowerCAmelCase : Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
__lowercase : str = int(__lowerCAmelCase )
__lowercase : Dict = ''''''
__lowercase : Optional[int] = False
if decimal < 0:
__lowercase : Any = True
decimal *= -1
while decimal > 0:
__lowercase , __lowercase : Dict = divmod(__lowerCAmelCase , 16 )
__lowercase : int = values[remainder] + hexadecimal
__lowercase : Tuple = '''0x''' + hexadecimal
if negative:
__lowercase : Optional[Any] = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase_ )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A__ : ClassVar[Features] = Features({'''audio''': Audio()} )
A__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
A__ : str = "audio"
A__ : str = "labels"
def snake_case_ ( self : List[Any] , _snake_case : List[str] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__lowercase : Optional[Any] = copy.deepcopy(self )
__lowercase : Optional[int] = self.label_schema.copy()
__lowercase : Tuple = features[self.label_column]
__lowercase : Optional[Any] = label_schema
return task_template
@property
def snake_case_ ( self : Optional[Any] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 156 | 1 |
def lowerCamelCase ( a_ = 600_851_475_143 ) -> int:
try:
lowerCAmelCase_ = int(a_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowerCAmelCase_ = 2
lowerCAmelCase_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCAmelCase_ = i
while n % i == 0:
lowerCAmelCase_ = n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 14 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_=False ) -> Tuple:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCAmelCase_ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if key.startswith('head' ):
lowerCAmelCase_ = key.replace('head' , 'classifier' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = SegformerConfig()
lowerCAmelCase_ = False
# set attributes based on model_name
lowerCAmelCase_ = 'huggingface/label-files'
if "segformer" in model_name:
lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCAmelCase_ = 150
lowerCAmelCase_ = 'ade20k-id2label.json'
lowerCAmelCase_ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase_ = 19
lowerCAmelCase_ = 'cityscapes-id2label.json'
lowerCAmelCase_ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowerCAmelCase_ = True
lowerCAmelCase_ = model_name[4:6]
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 256
elif size == "b2":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
else:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ = False
lowerCAmelCase_ = SegformerForImageClassification(a_ )
else:
lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 14 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase : List[str] = 16
UpperCAmelCase : Tuple = 32
def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 , lowerCamelCase__ : str = "bert-base-cased" ):
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ):
'''simple docstring'''
model.eval()
lowerCamelCase = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase = model(**lowerCamelCase__ )
lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase , lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase__ ) - 1:
lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = args.model_name_or_path
set_seed(lowerCamelCase__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , return_dict=lowerCamelCase__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=0 , num_training_steps=lowerCamelCase__ , )
else:
lowerCamelCase = DummyScheduler(lowerCamelCase__ , total_num_steps=lowerCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase = int(lowerCamelCase__ ) + 1
lowerCamelCase = evaluation_loop(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.print("""resumed checkpoint performance:""" , lowerCamelCase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
lowerCamelCase = json.load(lowerCamelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase = {}
for epoch in range(lowerCamelCase__ , lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
lowerCamelCase = model(**lowerCamelCase__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase = f'epoch_{epoch}'
lowerCamelCase = os.path.join(args.output_dir , lowerCamelCase__ )
accelerator.save_state(lowerCamelCase__ )
lowerCamelCase = evaluation_loop(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = accuracy
lowerCamelCase = lr_scheduler.get_lr()[0]
lowerCamelCase = optimizer.param_groups[0]["""lr"""]
lowerCamelCase = epoch
lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , lowerCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase__ , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=2 , help="""Number of train epochs.""" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 252 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( _lowercase):
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_12 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase="None" , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = relative_attention
lowerCAmelCase__ = position_biased_input
lowerCAmelCase__ = pos_att_type
lowerCAmelCase__ = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = DebertaVaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
lowerCAmelCase__ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
lowerCAmelCase__ = model(__UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = DebertaVaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DebertaVaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DebertaVaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = DebertaVaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = DebertaVaForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowercase , _lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Dict = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[Any] = True
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DebertaVaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__UpperCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DebertaVaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@unittest.skip(reason='Model not available yet' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
lowerCAmelCase__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 360 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=4_00 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 2_55 , _UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_pad
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase__ = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = self.size['shortest_edge']
else:
lowerCAmelCase__ = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Dict = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
lowerCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image and target
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor()
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
lowerCAmelCase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify masks
lowerCAmelCase__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
| 122 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 327 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : Tuple = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[Any] = mask_ratio
snake_case_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A )
snake_case_ : str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = TFViTMAEForPreTraining(_A )
snake_case_ : Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Dict = TFViTMAEForPreTraining(_A )
snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[str] = model(_A , training=_A )
snake_case_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs
snake_case_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__magic_name__: Dict = False
__magic_name__: Dict = False
__magic_name__: List[Any] = False
__magic_name__: Dict = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = TFViTMAEModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_A )
snake_case_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A )
snake_case_ : List[str] = model(_A , noise=_A )
snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
snake_case_ : str = model(**_A , noise=_A )
snake_case_ : Union[str, Any] = outputs_dict[0].numpy()
snake_case_ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_A : int ):
snake_case_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
snake_case_ : str = v.numpy()
else:
snake_case_ : Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_A )
snake_case_ : List[Any] = self._prepare_for_class(_A , _A )
snake_case_ : Any = prepare_numpy_arrays(_A )
snake_case_ : List[Any] = model(_A , noise=_A )
snake_case_ : List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , '_keras_serializable' , _A )
}
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Optional[Any] = main_layer_class(_A )
snake_case_ : List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
snake_case_ : int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' )
model.save(_A )
snake_case_ : str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
snake_case_ : List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A )
snake_case_ : int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Any = outputs.last_hidden_state.numpy()
snake_case_ : Optional[int] = 0
else:
snake_case_ : str = outputs.logits.numpy()
snake_case_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
snake_case_ : Any = model_class.from_pretrained(_A )
snake_case_ : Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Dict = after_outputs['last_hidden_state'].numpy()
snake_case_ : Dict = 0
else:
snake_case_ : Any = after_outputs['logits'].numpy()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
snake_case_ : int = self._prepare_for_class(_A , _A )
snake_case_ : str = model(_A , noise=_A )
snake_case_ : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
snake_case_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : str = model_class.from_config(model.config )
snake_case_ : Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : int = ViTMAEConfig()
snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Optional[Any] = model(**_A , noise=_A )
# verify the logits
snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 327 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : int = 4 ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =abs(__lowerCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__lowerCamelCase )] for y in range(__lowerCamelCase )]
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] ):
'''simple docstring'''
return reverse_row(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] ):
'''simple docstring'''
return reverse_row(reverse_column(__lowerCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] ):
'''simple docstring'''
return reverse_column(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =[list(__lowerCamelCase ) for x in zip(*__lowerCamelCase )]
return matrix
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =matrix[::-1]
return matrix
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =[x[::-1] for x in matrix]
return matrix
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] ):
'''simple docstring'''
for i in matrix:
print(*__lowerCamelCase )
if __name__ == "__main__":
lowercase =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
lowercase =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
lowercase =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 242 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase ='\nHuman: <<task>>\n\nAssistant: '
lowercase ='huggingface-tools/default-prompts'
lowercase ={'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
_UpperCAmelCase : List[str] =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCamelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Dict =cached_file(
__lowerCamelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCamelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 242 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionPanoramaPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCamelCase_ = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> List[str]:
lowerCamelCase_ = torch.manual_seed(lowercase )
lowerCamelCase_ = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> int:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = "french fries"
lowerCamelCase_ = sd_pipe(**lowercase , negative_prompt=lowercase )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase , view_batch_size=2 )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , skip_prk_steps=lowercase )
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> Any:
lowerCamelCase_ = torch.manual_seed(lowercase )
lowerCamelCase_ = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = "stabilityai/stable-diffusion-2-base"
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase , subfolder="scheduler" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCamelCase_ = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowercase )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCamelCase_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = 0
def callback_fn(lowercase , lowercase , lowercase ) -> None:
lowerCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase_ = False
lowerCamelCase_ = "stabilityai/stable-diffusion-2-base"
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase , subfolder="scheduler" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
pipe(**lowercase , callback=lowercase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = "stabilityai/stable-diffusion-2-base"
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase , subfolder="scheduler" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**lowercase )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 19 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : str =logging.get_logger(__name__)
A_ : int =OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
A_ : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Any:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCamelCase = model_type_to_module_name(snake_case )
_lowerCamelCase = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(snake_case , snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case , '__name__' , snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCamelCase = importlib.import_module('transformers' )
if hasattr(snake_case , snake_case ):
return getattr(snake_case , snake_case )
return None
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, os.PathLike] , snake_case : Optional[Union[str, os.PathLike]] = None , snake_case : bool = False , snake_case : bool = False , snake_case : Optional[Dict[str, str]] = None , snake_case : Optional[Union[bool, str]] = None , snake_case : Optional[str] = None , snake_case : bool = False , **snake_case : List[str] , )-> Optional[int]:
_lowerCamelCase = get_file_from_repo(
snake_case , snake_case , cache_dir=snake_case , force_download=snake_case , resume_download=snake_case , proxies=snake_case , use_auth_token=snake_case , revision=snake_case , local_files_only=snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(snake_case , encoding='utf-8' ) as reader:
return json.load(snake_case )
class __a :
def __init__( self ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(a__ )
def snake_case_ ( cls , a__ , **a__ ):
_lowerCamelCase = kwargs.pop('config' , a__ )
_lowerCamelCase = kwargs.pop('trust_remote_code' , a__ )
_lowerCamelCase = True
_lowerCamelCase , _lowerCamelCase = ImageProcessingMixin.get_image_processor_dict(a__ , **a__ )
_lowerCamelCase = config_dict.get('image_processor_type' , a__ )
_lowerCamelCase = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowerCamelCase = config_dict.pop('feature_extractor_type' , a__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_lowerCamelCase = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase = config_dict['auto_map']['AutoFeatureExtractor']
_lowerCamelCase = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a__ , a__ ):
_lowerCamelCase = AutoConfig.from_pretrained(a__ , **a__ )
# It could be in `config.image_processor_type``
_lowerCamelCase = getattr(a__ , 'image_processor_type' , a__ )
if hasattr(a__ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_lowerCamelCase = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_lowerCamelCase = image_processor_class_from_name(a__ )
_lowerCamelCase = image_processor_auto_map is not None
_lowerCamelCase = image_processor_class is not None or type(a__ ) in IMAGE_PROCESSOR_MAPPING
_lowerCamelCase = resolve_trust_remote_code(
a__ , a__ , a__ , a__ )
if has_remote_code and trust_remote_code:
_lowerCamelCase = get_class_from_dynamic_module(
a__ , a__ , **a__ )
_lowerCamelCase = kwargs.pop('code_revision' , a__ )
if os.path.isdir(a__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a__ , **a__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a__ , **a__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a__ ) in IMAGE_PROCESSOR_MAPPING:
_lowerCamelCase = IMAGE_PROCESSOR_MAPPING[type(a__ )]
return image_processor_class.from_dict(a__ , **a__ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def snake_case_ ( a__ , a__ ):
IMAGE_PROCESSOR_MAPPING.register(a__ , a__ )
| 80 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_500_000 )-> int:
_lowerCamelCase = defaultdict(snake_case )
_lowerCamelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case , 2 ):
if gcd(snake_case , snake_case ) > 1:
continue
_lowerCamelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case , limit + 1 , snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 80 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''encoder-decoder'''
SCREAMING_SNAKE_CASE__ = True
def __init__( self : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE : Dict = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE : Dict = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE : str = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = True
@classmethod
def lowerCamelCase_ ( cls : Dict , lowerCamelCase_ : PretrainedConfig , lowerCamelCase_ : PretrainedConfig , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Any = self.encoder.to_dict()
SCREAMING_SNAKE_CASE : Any = self.decoder.to_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.__class__.model_type
return output
| 323 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( _a , _a ):
@register_to_config
def __init__( self , *,
__lowerCAmelCase = 4 , __lowerCAmelCase = 768 , __lowerCAmelCase , __lowerCAmelCase , ):
super().__init__()
UpperCamelCase__ = nn.Parameter(torch.zeros(__lowerCAmelCase ) )
# parameters for additional clip time embeddings
UpperCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# parameters for encoder hidden states
UpperCamelCase__ = clip_extra_context_tokens
UpperCamelCase__ = nn.Linear(
__lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = nn.LayerNorm(__lowerCAmelCase )
def _lowerCamelCase ( self , *, __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase__ = image_embeddings.shape[0]
UpperCamelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase__ = classifier_free_guidance_embeddings.expand(
__lowerCAmelCase , -1 )
UpperCamelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase__ = self.embedding_proj(__lowerCAmelCase )
UpperCamelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__lowerCAmelCase )
UpperCamelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase__ = self.clip_extra_context_tokens_proj(__lowerCAmelCase )
UpperCamelCase__ = clip_extra_context_tokens.reshape(__lowerCAmelCase , -1 , self.clip_extra_context_tokens )
UpperCamelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase__ = self.encoder_hidden_states_proj(__lowerCAmelCase )
UpperCamelCase__ = self.text_encoder_hidden_states_norm(__lowerCAmelCase )
UpperCamelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 87 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCamelCase__ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCamelCase__ = "UperNetConfig"
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = False , __lowerCAmelCase = 1 , ):
super().__init__()
UpperCamelCase__ = nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , bias=__lowerCAmelCase , dilation=__lowerCAmelCase , )
UpperCamelCase__ = nn.BatchNormad(__lowerCAmelCase )
UpperCamelCase__ = nn.ReLU()
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.conv(__lowerCAmelCase )
UpperCamelCase__ = self.batch_norm(__lowerCAmelCase )
UpperCamelCase__ = self.activation(__lowerCAmelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = [
nn.AdaptiveAvgPoolad(__lowerCAmelCase ),
UperNetConvModule(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = input
for layer in self.layers:
UpperCamelCase__ = layer(__lowerCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = pool_scales
UpperCamelCase__ = align_corners
UpperCamelCase__ = in_channels
UpperCamelCase__ = channels
UpperCamelCase__ = []
for i, pool_scale in enumerate(__lowerCAmelCase ):
UpperCamelCase__ = UperNetPyramidPoolingBlock(pool_scale=__lowerCAmelCase , in_channels=__lowerCAmelCase , channels=__lowerCAmelCase )
self.blocks.append(__lowerCAmelCase )
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = []
for ppm in self.blocks:
UpperCamelCase__ = ppm(__lowerCAmelCase )
UpperCamelCase__ = nn.functional.interpolate(
__lowerCAmelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(__lowerCAmelCase )
return ppm_outs
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = config
UpperCamelCase__ = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase__ = in_channels
UpperCamelCase__ = config.hidden_size
UpperCamelCase__ = False
UpperCamelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase__ = nn.ModuleList()
UpperCamelCase__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase__ = UperNetConvModule(__lowerCAmelCase , self.channels , kernel_size=1 )
UpperCamelCase__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowerCAmelCase )
self.fpn_convs.append(__lowerCAmelCase )
UpperCamelCase__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowerCamelCase ( self ):
self.apply(self._init_weights )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = inputs[-1]
UpperCamelCase__ = [x]
psp_outs.extend(self.psp_modules(__lowerCAmelCase ) )
UpperCamelCase__ = torch.cat(__lowerCAmelCase , dim=1 )
UpperCamelCase__ = self.bottleneck(__lowerCAmelCase )
return output
def _lowerCamelCase ( self , __lowerCAmelCase ):
# build laterals
UpperCamelCase__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowerCAmelCase ) )
# build top-down path
UpperCamelCase__ = len(__lowerCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase__ = laterals[i - 1].shape[2:]
UpperCamelCase__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowerCAmelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
UpperCamelCase__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
UpperCamelCase__ = torch.cat(__lowerCAmelCase , dim=1 )
UpperCamelCase__ = self.fpn_bottleneck(__lowerCAmelCase )
UpperCamelCase__ = self.classifier(__lowerCAmelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 3 , __lowerCAmelCase = 1 ):
super().__init__()
UpperCamelCase__ = config
UpperCamelCase__ = config.auxiliary_in_channels
UpperCamelCase__ = config.auxiliary_channels
UpperCamelCase__ = config.auxiliary_num_convs
UpperCamelCase__ = config.auxiliary_concat_input
UpperCamelCase__ = in_index
UpperCamelCase__ = (kernel_size // 2) * dilation
UpperCamelCase__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
if self.num_convs == 0:
UpperCamelCase__ = nn.Identity()
else:
UpperCamelCase__ = nn.Sequential(*__lowerCAmelCase )
if self.concat_input:
UpperCamelCase__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=kernel_size // 2 )
UpperCamelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowerCamelCase ( self ):
self.apply(self._init_weights )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self , __lowerCAmelCase ):
# just take the relevant feature maps
UpperCamelCase__ = encoder_hidden_states[self.in_index]
UpperCamelCase__ = self.convs(__lowerCAmelCase )
if self.concat_input:
UpperCamelCase__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase__ = self.classifier(__lowerCAmelCase )
return output
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Any = UperNetConfig
snake_case : List[Any] = """pixel_values"""
snake_case : Optional[Any] = True
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCamelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = value
UpperCamelCase__ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCamelCase__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , _a , )
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
UpperCamelCase__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase__ = UperNetHead(__lowerCAmelCase , in_channels=self.backbone.channels )
UpperCamelCase__ = UperNetFCNHead(__lowerCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def _lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase__ = self.backbone.forward_with_filtered_kwargs(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , output_attentions=__lowerCAmelCase )
UpperCamelCase__ = outputs.feature_maps
UpperCamelCase__ = self.decode_head(__lowerCAmelCase )
UpperCamelCase__ = nn.functional.interpolate(__lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowerCAmelCase )
UpperCamelCase__ = None
if self.auxiliary_head is not None:
UpperCamelCase__ = self.auxiliary_head(__lowerCAmelCase )
UpperCamelCase__ = nn.functional.interpolate(
__lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowerCAmelCase )
UpperCamelCase__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
UpperCamelCase__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase__ = (logits,) + outputs[1:]
else:
UpperCamelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 87 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Dict=24 , SCREAMING_SNAKE_CASE : int=16 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Optional[Any]=32 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Dict=37 , SCREAMING_SNAKE_CASE : List[str]="gelu" , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : List[str]=2 , ):
lowercase__ : Optional[Any] = parent
lowercase__ : Any = batch_size
lowercase__ : Optional[Any] = patch_size
lowercase__ : Optional[int] = max_length
lowercase__ : Dict = num_mel_bins
lowercase__ : Union[str, Any] = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : Tuple = initializer_range
lowercase__ : str = scope
lowercase__ : str = frequency_stride
lowercase__ : Optional[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase__ : Union[str, Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase__ : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
lowercase__ : Optional[int] = frequency_out_dimension * time_out_dimension
lowercase__ : int = num_patches + 2
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[int] = self.get_config()
return config, input_values, labels
def snake_case ( self : Optional[Any] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Optional[int] = ASTModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[str] = config_and_inputs
lowercase__ : Tuple = {"input_values": input_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case ( self : Tuple ):
lowercase__ : str = ASTModelTester(self )
lowercase__ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def snake_case ( self : List[Any] ):
pass
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : List[str] ):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[Any] ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
lowercase__ , lowercase__ : Optional[int] = torchaudio.load(lowerCamelCase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : int ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def snake_case ( self : Tuple ):
lowercase__ : Any = self.default_feature_extractor
lowercase__ : Tuple = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.default_feature_extractor
lowercase__ , lowercase__ : Any = prepare_audio()
lowercase__ : Tuple = audio.squeeze().numpy()
lowercase__ : Dict = feature_extractor(SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : int = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 130 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130 | 1 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
UpperCAmelCase = logging.getLogger(__name__)
UpperCAmelCase = '''Hello world! cécé herlolip'''
UpperCAmelCase = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def lowerCamelCase (a_ :str , a_ :List[str]) -> Union[str, Any]:
lowercase :Any = BertAbsConfig(
temp_dir='''.''' , finetune_bert=a_ , large=a_ , share_emb=a_ , use_bert_emb=a_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowercase :List[str] = torch.load(a_ , lambda a_ , a_: storage)
lowercase :List[str] = AbsSummarizer(a_ , torch.device('''cpu''') , a_)
original.eval()
lowercase :Optional[Any] = BertAbsSummarizer(a_ , torch.device('''cpu'''))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''')
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''')
lowercase :Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''')
# prepare the model inputs
lowercase :int = tokenizer.encode('''This is sample éàalj\'-.''')
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(a_)))
lowercase :List[Any] = torch.tensor(a_).unsqueeze(0)
lowercase :Optional[Any] = tokenizer.encode('''This is sample 3 éàalj\'-.''')
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(a_)))
lowercase :Any = torch.tensor(a_).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
lowercase :str = encoder_input_ids
lowercase :Any = decoder_input_ids
lowercase :List[str] = None
lowercase :str = None
lowercase :str = None
lowercase :str = None
lowercase :Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowercase :Dict = original(a_ , a_ , a_ , a_ , a_ , a_ , a_)[0]
lowercase :List[str] = original.generator(a_)
lowercase :Dict = new_model(
a_ , a_ , a_ , a_ , a_)[0]
lowercase :Any = new_model.generator(a_)
lowercase :Dict = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(a_))
lowercase :Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(a_))
lowercase :str = torch.allclose(a_ , a_ , atol=1E-3)
if are_identical:
logging.info('''all weights are equal up to 1e-3''')
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''')
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''')
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
UpperCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 172 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :str) -> YolosConfig:
lowercase :Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase :List[str] = 192
lowercase :List[str] = 768
lowercase :int = 12
lowercase :str = 3
lowercase :List[Any] = [800, 1333]
lowercase :Any = False
elif yolos_name == "yolos_s_dWr":
lowercase :List[str] = 330
lowercase :List[Any] = 14
lowercase :int = 6
lowercase :List[Any] = 1320
elif "yolos_s" in yolos_name:
lowercase :int = 384
lowercase :Union[str, Any] = 1536
lowercase :int = 12
lowercase :str = 6
elif "yolos_b" in yolos_name:
lowercase :Dict = [800, 1344]
lowercase :List[str] = 91
lowercase :List[Any] = '''huggingface/label-files'''
lowercase :Union[str, Any] = '''coco-detection-id2label.json'''
lowercase :int = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :List[Any] = {int(a_): v for k, v in idalabel.items()}
lowercase :Dict = idalabel
lowercase :Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase (a_ :dict , a_ :YolosConfig , a_ :bool = False) -> Optional[int]:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""")
lowercase :List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowercase :int = in_proj_weight[: config.hidden_size, :]
lowercase :List[str] = in_proj_bias[: config.hidden_size]
lowercase :Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Any = in_proj_weight[-config.hidden_size :, :]
lowercase :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase (a_ :str) -> str:
if "backbone" in name:
lowercase :Optional[int] = name.replace('''backbone''' , '''vit''')
if "cls_token" in name:
lowercase :List[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''')
if "det_token" in name:
lowercase :int = name.replace('''det_token''' , '''embeddings.detection_tokens''')
if "mid_pos_embed" in name:
lowercase :List[Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''')
if "pos_embed" in name:
lowercase :List[str] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''')
if "patch_embed.proj" in name:
lowercase :Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "blocks" in name:
lowercase :Any = name.replace('''blocks''' , '''encoder.layer''')
if "attn.proj" in name:
lowercase :Dict = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
lowercase :Tuple = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
lowercase :List[Any] = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
lowercase :List[Any] = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
lowercase :Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
lowercase :Dict = name.replace('''mlp.fc2''' , '''output.dense''')
if "class_embed" in name:
lowercase :Dict = name.replace('''class_embed''' , '''class_labels_classifier''')
if "bbox_embed" in name:
lowercase :Dict = name.replace('''bbox_embed''' , '''bbox_predictor''')
if "vit.norm" in name:
lowercase :Dict = name.replace('''vit.norm''' , '''vit.layernorm''')
return name
def lowerCamelCase (a_ :dict , a_ :YolosForObjectDetection) -> dict:
for key in orig_state_dict.copy().keys():
lowercase :List[Any] = orig_state_dict.pop(a_)
if "qkv" in key:
lowercase :str = key.split('''.''')
lowercase :List[str] = int(key_split[2])
lowercase :List[str] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase :List[Any] = val[:dim, :]
lowercase :Optional[int] = val[
dim : dim * 2, :
]
lowercase :Any = val[-dim:, :]
else:
lowercase :List[str] = val[:dim]
lowercase :Union[str, Any] = val[dim : dim * 2]
lowercase :List[Any] = val[-dim:]
else:
lowercase :List[str] = val
return orig_state_dict
def lowerCamelCase () -> torch.Tensor:
lowercase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase :Dict = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :str , a_ :bool = False) -> List[Any]:
lowercase :Union[str, Any] = get_yolos_config(a_)
# load original state_dict
lowercase :List[str] = torch.load(a_ , map_location='''cpu''')['''model''']
# load 🤗 model
lowercase :Tuple = YolosForObjectDetection(a_)
model.eval()
lowercase :Dict = convert_state_dict(a_ , a_)
model.load_state_dict(a_)
# Check outputs on an image, prepared by YolosImageProcessor
lowercase :Tuple = 800 if yolos_name != '''yolos_ti''' else 512
lowercase :Dict = YolosImageProcessor(format='''coco_detection''' , size=a_)
lowercase :Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''')
lowercase :List[Any] = model(**a_)
lowercase , lowercase :Dict = outputs.logits, outputs.pred_boxes
lowercase , lowercase :int = None, None
if yolos_name == "yolos_ti":
lowercase :Dict = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]])
lowercase :Dict = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]])
elif yolos_name == "yolos_s_200_pre":
lowercase :Union[str, Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]])
lowercase :List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]])
elif yolos_name == "yolos_s_300_pre":
lowercase :int = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]])
lowercase :Optional[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]])
elif yolos_name == "yolos_s_dWr":
lowercase :int = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]])
lowercase :Dict = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]])
elif yolos_name == "yolos_base":
lowercase :Dict = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]])
lowercase :Tuple = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]])
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""")
assert torch.allclose(logits[0, :3, :3] , a_ , atol=1E-4)
assert torch.allclose(pred_boxes[0, :3, :3] , a_ , atol=1E-4)
Path(a_).mkdir(exist_ok=a_)
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(a_)
if push_to_hub:
lowercase :Optional[int] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''')
lowercase :Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(a_ , organization='''hustvl''')
model.push_to_hub(a_ , organization='''hustvl''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 172 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Dict ):
__A = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
__A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__A = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__A = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__A = model(A )["last_hidden_state"].detach()
self.assertEqual(output.shape ,A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,A ,atol=1E-3 ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
__A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__A = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__A = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__A = model(A )["last_hidden_state"].detach()
self.assertEqual(output.shape ,A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,A ,atol=1E-3 ) )
| 15 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
a_ : Optional[int] = eval_examples
a_ : Tuple = post_process_function
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]:
a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(_lowercase )
a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Optional[int] = self.compute_metrics
a_ : List[str] = None
a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Any = time.time()
try:
a_ : Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Dict = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
a_ : Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
a_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str:
a_ : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : int = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Union[str, Any] = time.time()
try:
a_ : List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Optional[Any] = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 248 | 0 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ):
return [ord(lowerCamelCase_ ) - 96 for elem in plain]
def UpperCamelCase_( lowerCamelCase_ ):
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCamelCase_( ):
_lowercase : Dict = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCamelCase_ )
print('Decoded:' , decode(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
| 359 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 84 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = 'data2vec-text'
def __init__( self : Tuple ,_UpperCAmelCase : int=30522 ,_UpperCAmelCase : Tuple=768 ,_UpperCAmelCase : Optional[int]=12 ,_UpperCAmelCase : List[str]=12 ,_UpperCAmelCase : Any=3072 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Optional[Any]=0.1 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : str=0.02 ,_UpperCAmelCase : int=1E-12 ,_UpperCAmelCase : Tuple=1 ,_UpperCAmelCase : Tuple=0 ,_UpperCAmelCase : Any=2 ,_UpperCAmelCase : Any="absolute" ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : List[str]=None ,**_UpperCAmelCase : Dict ,):
super().__init__(pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = vocab_size
_a : Dict = hidden_size
_a : Tuple = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Union[str, Any] = hidden_act
_a : Optional[int] = intermediate_size
_a : Optional[int] = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : str = type_vocab_size
_a : Union[str, Any] = initializer_range
_a : Optional[int] = layer_norm_eps
_a : Optional[Any] = position_embedding_type
_a : Dict = use_cache
_a : Any = classifier_dropout
class __magic_name__ ( _UpperCamelCase ):
@property
def __lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 89 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 364 | """simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowercase ( a__ : Union[str, Any] , a__ : int , a__ : List[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : List[Any] ) -> Optional[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(a__ ) , version.parse(a__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( a__ : str , a__ : Optional[str] = None ) -> None:
_UpperCamelCase = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , a__ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = requirement, None, None
else:
_UpperCamelCase = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_full.split(''',''' ) # there could be multiple requirements
_UpperCamelCase = {}
for w in want_range:
_UpperCamelCase = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_UpperCamelCase = '''.'''.join([str(a__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
return
# check if any version is installed
try:
_UpperCamelCase = importlib.metadata.version(a__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
def lowercase ( a__ : Tuple ) -> Any:
_UpperCamelCase = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(a__ , a__ )
| 54 | 0 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a = 'main'
# Default branch name
a = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
a = 'aaaaaaa'
# This commit does not exist, so we should 404.
a = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
a = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def _snake_case ( ) -> int:
'''simple docstring'''
print('Bonjour!' )
yield
print('Au revoir!' )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ):
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] ):
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str ):
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def lowerCAmelCase_ ( self : int ):
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] )
class lowercase_ ( lowercase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
@require_tf
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] )
class lowercase_ ( lowercase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
@require_flax
def lowerCAmelCase_ ( self : int ):
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
class lowercase_ ( lowercase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
| 315 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[str] = TransfoXLTokenizer
snake_case__ : Any = False
snake_case__ : Optional[int] = False
def _A ( self : int ):
super().setUp()
UpperCamelCase :Dict = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _A ( self : List[Any] , **__lowerCamelCase : int ):
UpperCamelCase :List[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : Optional[Any] ):
UpperCamelCase :List[Any] = """<unk> UNwanted , running"""
UpperCamelCase :Optional[Any] = """<unk> unwanted, running"""
return input_text, output_text
def _A ( self : str ):
UpperCamelCase :Optional[int] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__lowerCamelCase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] )
def _A ( self : int ):
UpperCamelCase :Any = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def _A ( self : Dict ):
UpperCamelCase :Optional[Any] = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _A ( self : Union[str, Any] ):
UpperCamelCase :Union[str, Any] = TransfoXLTokenizer(lower_case=__lowerCamelCase )
UpperCamelCase :List[str] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
UpperCamelCase :List[str] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase )
def _A ( self : str ):
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :Optional[Any] = len(__lowerCamelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 62 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[int] = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 62 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ = 600_851_475_143 ) -> int:
"""simple docstring"""
try:
A__ = int(lowercase_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(lowercase_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 14 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random"""
_lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
| 14 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__snake_case = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__snake_case = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__snake_case = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase__ :Any = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase__ :Optional[Any] = evaluate(dataset=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ )
return score | 366 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( __a ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = str(__a )
UpperCamelCase__ :Dict = [n]
for i in range(1 , len(__a ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def a ( __a ) -> bool:
'''simple docstring'''
if len(str(__a ) ) > 3:
if not is_prime(int(str(__a )[-3:] ) ) or not is_prime(int(str(__a )[:3] ) ):
return False
return True
def a ( __a = 11 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :list[int] = []
UpperCamelCase__ :int = 13
while len(__a ) != count:
if validate(__a ):
UpperCamelCase__ :Optional[int] = list_truncated_nums(__a )
if all(is_prime(__a ) for i in list_nums ):
list_truncated_primes.append(__a )
num += 2
return list_truncated_primes
def a ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""") | 219 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Optional[int] = 10 , lowercase : Tuple = 22 ) -> Any:
_a = range(1 , lowercase__ )
_a = range(1 , lowercase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 63 |
def _lowercase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:]
__lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Any , a__ : List[str] ) -> str:
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(a__ )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_ = AlbertForPreTraining(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(a__ , a__ , a__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 261 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 261 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( _lowerCAmelCase : str ) -> Any:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> int:
_UpperCAmelCase : Dict = create_tensor(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = gather(_lowerCAmelCase )
assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1 ) )
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Dict:
_UpperCAmelCase : int = [state.process_index]
_UpperCAmelCase : str = gather_object(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == state.num_processes, f'''{gathered_obj}, {len(_lowerCAmelCase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> str:
_UpperCAmelCase : Union[str, Any] = create_tensor(_lowerCAmelCase )
_UpperCAmelCase : int = broadcast(_lowerCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1 ) )
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_UpperCAmelCase : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_UpperCAmelCase : Any = torch.arange(state.num_processes ).to(state.device )
_UpperCAmelCase : Dict = pad_across_processes(_lowerCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0, state.num_processes ) ) + [0]
def UpperCamelCase ( _lowerCAmelCase : Any ) -> str:
# For now runs on only two processes
if state.num_processes != 2:
return
_UpperCAmelCase : str = create_tensor(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = reduce(_lowerCAmelCase, """sum""" )
_UpperCAmelCase : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowerCAmelCase, _lowerCAmelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
_UpperCAmelCase : List[str] = create_tensor(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = reduce(_lowerCAmelCase, """mean""" )
_UpperCAmelCase : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowerCAmelCase, _lowerCAmelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> List[str]:
# For xla_spawn (TPUs)
main()
def UpperCamelCase ( ) -> Union[str, Any]:
_UpperCAmelCase : str = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(_lowerCAmelCase )
state.print("""testing gather_object""" )
test_gather_object(_lowerCAmelCase )
state.print("""testing broadcast""" )
test_broadcast(_lowerCAmelCase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_lowerCAmelCase )
state.print("""testing reduce_sum""" )
test_reduce_sum(_lowerCAmelCase )
state.print("""testing reduce_mean""" )
test_reduce_mean(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 246 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCamelCase__ : List[str] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCamelCase__ : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCamelCase__ : Union[str, Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def __snake_case ( self , _A , _A , _A=None ) -> str:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(_A , _A , sample_weight=_A ) ),
}
| 246 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPFeatureExtractor"]
__A = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if isinstance(__a , __a ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(__a , __a ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
lowerCamelCase__: Optional[int] =False
if num < 0:
lowerCamelCase__: Optional[Any] =True
lowerCamelCase__: List[Any] =-num
lowerCamelCase__: list[int] =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__a ) for e in binary )
return "0b" + "".join(str(__a ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 1 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """pixel_values"""
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = TimmBackboneConfig
def __init__( self : str , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , 'timm' )
super().__init__(UpperCamelCase__ )
UpperCamelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(UpperCamelCase__ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
UpperCamelCase = getattr(UpperCamelCase__ , 'use_pretrained_backbone' , UpperCamelCase__ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCamelCase = config.out_indices if getattr(UpperCamelCase__ , 'out_indices' , UpperCamelCase__ ) is not None else (-1,)
UpperCamelCase = timm.create_model(
config.backbone , pretrained=UpperCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase__ , **UpperCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCamelCase = self._backbone.return_layers
UpperCamelCase = {layer['module']: str(UpperCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCamelCase__ )
@classmethod
def A ( cls : List[Any] , UpperCamelCase__ : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCamelCase = kwargs.pop('config' , TimmBackboneConfig() )
UpperCamelCase = kwargs.pop('use_timm_backbone' , UpperCamelCase__ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
UpperCamelCase = kwargs.pop('num_channels' , config.num_channels )
UpperCamelCase = kwargs.pop('features_only' , config.features_only )
UpperCamelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
UpperCamelCase = kwargs.pop('out_indices' , config.out_indices )
UpperCamelCase = TimmBackboneConfig(
backbone=UpperCamelCase__ , num_channels=UpperCamelCase__ , features_only=UpperCamelCase__ , use_pretrained_backbone=UpperCamelCase__ , out_indices=UpperCamelCase__ , )
return super()._from_config(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Dict ):
"""simple docstring"""
pass
def A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCamelCase = self._all_layers
UpperCamelCase = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = self._return_layers
UpperCamelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCamelCase = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = None
UpperCamelCase = tuple(UpperCamelCase__ )
UpperCamelCase = tuple(UpperCamelCase__ ) if hidden_states is not None else None
if not return_dict:
UpperCamelCase = (feature_maps,)
if output_hidden_states:
UpperCamelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCamelCase__ , hidden_states=UpperCamelCase__ , attentions=UpperCamelCase__ )
| 28 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ):
"""simple docstring"""
UpperCamelCase = sigma_max
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
raise NotImplementedError()
| 28 | 1 |
def snake_case (__lowercase ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__lowercase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod() | 284 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Any = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure) | 284 | 1 |
'''simple docstring'''
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = len(_lowerCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1, _lowerCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if len(_lowerCAmelCase ) <= 1:
return arr, 0
_UpperCamelCase = len(_lowerCAmelCase ) // 2
_UpperCamelCase = arr[0:mid]
_UpperCamelCase = arr[mid:]
_UpperCamelCase = count_inversions_recursive(_lowerCAmelCase )
_UpperCamelCase = count_inversions_recursive(_lowerCAmelCase )
_UpperCamelCase = _count_cross_inversions(_lowerCAmelCase, _lowerCAmelCase )
_UpperCamelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( lowercase : Optional[Any], lowercase : Any ) -> int:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(_lowerCAmelCase ) and j < len(_lowerCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCamelCase = count_inversions_bf(_lowerCAmelCase )
_UpperCamelCase = count_inversions_recursive(_lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''', _lowerCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCamelCase = count_inversions_bf(_lowerCAmelCase )
_UpperCamelCase = count_inversions_recursive(_lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''', _lowerCAmelCase )
# an empty list should also have zero inversions
_UpperCamelCase = []
_UpperCamelCase = count_inversions_bf(_lowerCAmelCase )
_UpperCamelCase = count_inversions_recursive(_lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''', _lowerCAmelCase )
if __name__ == "__main__":
main()
| 324 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Optional[int] = 'convnextv2'
def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Dict = num_channels
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : Union[str, Any] = num_stages
UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : str = drop_path_rate
UpperCamelCase : List[str] = image_size
UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 52 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 359 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''yjernite/retribert-base-uncased''': 512,
}
lowerCamelCase_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Union[str, Any] = RetriBertTokenizer
lowerCamelCase_ : str = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> List[Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowerCamelCase )
snake_case_ = do_lower_case
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=None ) -> str:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase ) | 34 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE__ : Dict = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def A ( ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = _ask_options(
"In which compute environment are you running?" ,["This machine", "AWS (Amazon SageMaker)"] ,_convert_compute_environment ,)
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase : Union[str, Any] = get_sagemaker_input()
else:
lowerCamelCase : Tuple = get_cluster_input()
return config
def A ( _SCREAMING_SNAKE_CASE=None ) -> str:
if subparsers is not None:
lowerCamelCase : int = subparsers.add_parser("config" ,description=_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Optional[Any] = argparse.ArgumentParser("Accelerate config command" ,description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
"--config_file" ,default=_SCREAMING_SNAKE_CASE ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : int = get_user_input()
if args.config_file is not None:
lowerCamelCase : Optional[int] = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def A ( ) -> List[str]:
lowerCamelCase : int = config_command_parser()
lowerCamelCase : Tuple = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__ : Dict = logging.getLogger(__name__)
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
UpperCAmelCase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
UpperCAmelCase__ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class UpperCamelCase__ :
UpperCAmelCase__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys())})
UpperCAmelCase__ : str = field(metadata={'help': 'Should contain the data files for the task.'})
UpperCAmelCase__ : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase__ : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def snake_case ( )-> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__A , __A , __A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
__A = processors[data_args.task_name]()
__A = processor.get_labels()
__A = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__A = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
__A = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
__A = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__A = trainer.evaluate()
__A = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def snake_case ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 161 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_A = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__UpperCamelCase ='lm_head'
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
__UpperCamelCase =hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__UpperCamelCase =value
elif weight_type == "weight_g":
__UpperCamelCase =value
elif weight_type == "weight_v":
__UpperCamelCase =value
elif weight_type == "bias":
__UpperCamelCase =value
else:
__UpperCamelCase =value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[]
__UpperCamelCase =fairseq_model.state_dict()
__UpperCamelCase =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase =False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase =True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase ='unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase =True
if "*" in mapped_key:
__UpperCamelCase =name.split(SCREAMING_SNAKE_CASE__ )[0].split('.' )[-2]
__UpperCamelCase =mapped_key.replace('*' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
__UpperCamelCase ='weight_g'
elif "weight_v" in name:
__UpperCamelCase ='weight_v'
elif "bias" in name:
__UpperCamelCase ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase ='weight'
else:
__UpperCamelCase =None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'Unused weights: {unused_weights}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =full_name.split('conv_layers.' )[-1]
__UpperCamelCase =name.split('.' )
__UpperCamelCase =int(items[0] )
__UpperCamelCase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__UpperCamelCase =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__UpperCamelCase =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__UpperCamelCase =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__UpperCamelCase =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=True ):
if config_path is not None:
__UpperCamelCase =UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =UniSpeechConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase =Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase =target_dict.pad_index
__UpperCamelCase =target_dict.bos_index
__UpperCamelCase =target_dict.eos_index
__UpperCamelCase =len(target_dict.symbols )
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase =42
__UpperCamelCase =43
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =True if config.feat_extract_norm == 'layer' else False
__UpperCamelCase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase =model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 117 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : Optional[torch.FloatTensor] = None
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=0.999 , SCREAMING_SNAKE_CASE__ : str="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__UpperCamelCase =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =i / num_diffusion_timesteps
__UpperCamelCase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
return torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
class UpperCAmelCase__ ( A_ , A_ ):
"""simple docstring"""
@register_to_config
def __init__( self , A_ = 1000 , A_ = "fixed_small_log" , A_ = True , A_ = 1.0 , A_ = "epsilon" , A_ = "squaredcos_cap_v2" , ) -> Tuple:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
__UpperCamelCase =betas_for_alpha_bar(A_ )
__UpperCamelCase =1.0 - self.betas
__UpperCamelCase =torch.cumprod(self.alphas , dim=0 )
__UpperCamelCase =torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__UpperCamelCase =1.0
# setable values
__UpperCamelCase =None
__UpperCamelCase =torch.from_numpy(np.arange(0 , A_ )[::-1].copy() )
__UpperCamelCase =variance_type
def _a ( self , A_ , A_ = None ) -> torch.FloatTensor:
return sample
def _a ( self , A_ , A_ = None ) -> Tuple:
__UpperCamelCase =num_inference_steps
__UpperCamelCase =(self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__UpperCamelCase =(np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__UpperCamelCase =torch.from_numpy(A_ ).to(A_ )
def _a ( self , A_ , A_=None , A_=None , A_=None ) -> List[Any]:
if prev_timestep is None:
__UpperCamelCase =t - 1
__UpperCamelCase =self.alphas_cumprod[t]
__UpperCamelCase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase =1 - alpha_prod_t
__UpperCamelCase =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase =self.betas[t]
else:
__UpperCamelCase =1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCamelCase =beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__UpperCamelCase =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__UpperCamelCase =torch.log(torch.clamp(A_ , min=1E-20 ) )
__UpperCamelCase =torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__UpperCamelCase =variance.log()
__UpperCamelCase =beta.log()
__UpperCamelCase =(predicted_variance + 1) / 2
__UpperCamelCase =frac * max_log + (1 - frac) * min_log
return variance
def _a ( self , A_ , A_ , A_ , A_ = None , A_=None , A_ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
__UpperCamelCase =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__UpperCamelCase , __UpperCamelCase =torch.split(A_ , sample.shape[1] , dim=1 )
else:
__UpperCamelCase =None
# 1. compute alphas, betas
if prev_timestep is None:
__UpperCamelCase =t - 1
__UpperCamelCase =self.alphas_cumprod[t]
__UpperCamelCase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase =1 - alpha_prod_t
__UpperCamelCase =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase =self.betas[t]
__UpperCamelCase =self.alphas[t]
else:
__UpperCamelCase =1 - alpha_prod_t / alpha_prod_t_prev
__UpperCamelCase =1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCamelCase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCamelCase =model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCamelCase =torch.clamp(
A_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase =(alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__UpperCamelCase =alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__UpperCamelCase =0
if t > 0:
__UpperCamelCase =randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=A_ , device=model_output.device )
__UpperCamelCase =self._get_variance(
A_ , predicted_variance=A_ , prev_timestep=A_ , )
if self.variance_type == "fixed_small_log":
__UpperCamelCase =variance
elif self.variance_type == "learned_range":
__UpperCamelCase =(0.5 * variance).exp()
else:
raise ValueError(
f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
' for the UnCLIPScheduler.' )
__UpperCamelCase =variance * variance_noise
__UpperCamelCase =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ )
def _a ( self , A_ , A_ , A_ , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
__UpperCamelCase =self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__UpperCamelCase =timesteps.to(original_samples.device )
__UpperCamelCase =alphas_cumprod[timesteps] ** 0.5
__UpperCamelCase =sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase =sqrt_alpha_prod.unsqueeze(-1 )
__UpperCamelCase =(1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCamelCase =sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase =sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__UpperCamelCase =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 117 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[Any] = logging.get_logger(__name__)
a_ : Union[str, Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class _snake_case ( SCREAMING_SNAKE_CASE_ ):
_lowercase : Any = """ctrl"""
_lowercase : Union[str, Any] = ["""past_key_values"""]
_lowercase : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , a=24_6534 , a=256 , a=1280 , a=8192 , a=48 , a=16 , a=0.1 , a=0.1 , a=1E-6 , a=0.02 , a=True , **a , ) -> Dict:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = dff
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
super().__init__(**a)
| 137 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """swin"""
snake_case_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowerCAmelCase : Optional[int]=224 , lowerCAmelCase : int=4 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=96 , lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : int = image_size
_snake_case : Any = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : int = embed_dim
_snake_case : Dict = depths
_snake_case : Dict = len(lowerCAmelCase)
_snake_case : Optional[Any] = num_heads
_snake_case : Tuple = window_size
_snake_case : int = mlp_ratio
_snake_case : Any = qkv_bias
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[Any] = drop_path_rate
_snake_case : List[Any] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : Any = initializer_range
_snake_case : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Dict = int(embed_dim * 2 ** (len(lowerCAmelCase) - 1))
_snake_case : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase) + 1)]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Dict) -> float:
"""simple docstring"""
return 1E-4
| 317 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
UpperCamelCase_ = credit_card_number
UpperCamelCase_ = 0
UpperCamelCase_ = len(UpperCamelCase_ ) - 2
for i in range(UpperCamelCase_ , -1 , -2 ):
# double the value of every second digit
UpperCamelCase_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase_ = cc_number[:i] + str(UpperCamelCase_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCamelCase_ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
UpperCamelCase_ = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(UpperCamelCase_ ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(UpperCamelCase_ ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(UpperCamelCase_ ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 328 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: List[str] , *,
_SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , *, _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 328 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : List[str] = "ZinengTang/tvlt-base"
lowercase__ : str = tempfile.mkdtemp()
def __UpperCamelCase ( self : str , **lowercase_ : int ) -> Optional[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowercase_ )
def __UpperCamelCase ( self : List[str] , **lowercase_ : Optional[Any] ) -> Tuple:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ )
def __UpperCamelCase ( self : str ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[int]:
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[int] = self.get_feature_extractor()
lowercase__ : List[Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
self.assertIsInstance(processor.image_processor , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_feature_extractor()
lowercase__ : List[Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : List[str] = np.ones([1_20_00] )
lowercase__ : int = feature_extractor(lowercase_ , return_tensors="np" )
lowercase__ : Any = processor(audio=lowercase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Union[str, Any] = self.get_feature_extractor()
lowercase__ : Tuple = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : Optional[int] = np.ones([3, 2_24, 2_24] )
lowercase__ : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=lowercase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Optional[Any] = self.get_image_processor()
lowercase__ : List[Any] = self.get_feature_extractor()
lowercase__ : Tuple = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : str = np.ones([1_20_00] )
lowercase__ : Dict = np.ones([3, 2_24, 2_24] )
lowercase__ : Any = processor(audio=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
lowercase__ : str = self.get_image_processor()
lowercase__ : Dict = self.get_feature_extractor()
lowercase__ : List[str] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 87 | import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict):
lowercase__ : List[Any] = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]):
lowercase__ : Optional[Any] = tmp_path / "cache"
lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : int = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
lowercase__ : str = features.copy()
lowercase__ : str = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]):
lowercase__ : Union[str, Any] = tmp_path / "cache"
lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int):
if issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Tuple = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : str = [jsonl_path]
lowercase__ : str = tmp_path / "cache"
lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
lowercase__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[str] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]):
lowercase__ : str = tmp_path / "cache"
lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = features.copy() if features else default_expected_features
lowercase__ : Union[str, Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
if split:
lowercase__ : Tuple = {split: jsonl_path}
else:
lowercase__ : Tuple = "train"
lowercase__ : int = {"train": jsonl_path, "test": jsonl_path}
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return json.load(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]):
return [json.loads(_lowerCamelCase) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : Optional[int] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : str = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Optional[Any] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any:
lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : List[Any] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : str = f.read()
assert exported_content == original_content
| 87 | 1 |
'''simple docstring'''
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def a__ ( lowerCAmelCase__=None ) -> int:
UpperCAmelCase__ : Tuple = argparse.ArgumentParser(add_help=_lowerCamelCase , allow_abbrev=_lowerCamelCase )
# The main config parser
UpperCAmelCase__ : List[Any] = config_command_parser(_lowerCamelCase )
# The subparser to add commands to
UpperCAmelCase__ : int = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(_lowerCamelCase , parents=[parent_parser] )
update_command_parser(_lowerCamelCase , parents=[parent_parser] )
return config_parser
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = get_config_parser()
UpperCAmelCase__ : List[Any] = config_parser.parse_args()
if not hasattr(_lowerCamelCase , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(_lowerCamelCase )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( lowerCAmelCase__ ) -> None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase__ : str = sum(single_char_strings.values() )
# one length string
UpperCAmelCase__ : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase__ : Optional[int] = single_char_strings[ch]
UpperCAmelCase__ : int = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCAmelCase__ : str = sum(two_char_strings.values() )
UpperCAmelCase__ : Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase__ : Optional[int] = cha + cha
if sequence in two_char_strings:
UpperCAmelCase__ : Dict = two_char_strings[sequence]
UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]:
UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore
UpperCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 299 | 0 |
import copy
import re
class A_ :
lowerCAmelCase__ = """hp"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = None
@classmethod
def _lowerCAmelCase (cls :str , _UpperCamelCase :Tuple , _UpperCamelCase :Any )-> Optional[Any]:
__A = prefix
__A = defaults
cls.build_naming_info()
@staticmethod
def _lowerCAmelCase (_UpperCamelCase :str , _UpperCamelCase :int )-> Tuple:
if len(_UpperCamelCase ) == 0:
return ""
__A = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCamelCase ) + 1 ):
__A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCamelCase :Dict ):
__A = ''''''
while integer != 0:
__A = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
__A = 0
while True:
__A = word + '''#''' + int_to_alphabetic(_UpperCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__A = sword
break
__A = short_word
__A = word
return short_word
@staticmethod
def _lowerCAmelCase (_UpperCamelCase :Any , _UpperCamelCase :Any )-> Tuple:
__A = param_name.split('''_''' )
__A = [TrialShortNamer.shortname_for_word(_UpperCamelCase , _UpperCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__A = ['''''', '''_''']
for separator in separators:
__A = separator.join(_UpperCamelCase )
if shortname not in info["reverse_short_param"]:
__A = shortname
__A = param_name
return shortname
return param_name
@staticmethod
def _lowerCAmelCase (_UpperCamelCase :int , _UpperCamelCase :List[str] )-> List[Any]:
__A = TrialShortNamer.shortname_for_key(_UpperCamelCase , _UpperCamelCase )
__A = short_name
__A = param_name
@classmethod
def _lowerCAmelCase (cls :List[str] )-> int:
if cls.NAMING_INFO is not None:
return
__A = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
__A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCamelCase , _UpperCamelCase )
__A = info
@classmethod
def _lowerCAmelCase (cls :Tuple , _UpperCamelCase :List[str] )-> Optional[int]:
cls.build_naming_info()
assert cls.PREFIX is not None
__A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__A = cls.NAMING_INFO['''short_param'''][k]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = 1 if v else 0
__A = '''''' if isinstance(_UpperCamelCase , (int, float) ) else '''-'''
__A = f"""{key}{sep}{v}"""
name.append(_UpperCamelCase )
return "_".join(_UpperCamelCase )
@classmethod
def _lowerCAmelCase (cls :Dict , _UpperCamelCase :str )-> Union[str, Any]:
__A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__A = []
else:
__A = repr.split('''_''' )
__A = {}
for value in values:
if "-" in value:
__A , __A = value.split('''-''' )
else:
__A = re.sub('''[0-9.]''' , '''''' , _UpperCamelCase )
__A = float(re.sub('''[^0-9.]''' , '''''' , _UpperCamelCase ) )
__A = cls.NAMING_INFO['''reverse_short_param'''][p_k]
__A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__A = cls.DEFAULTS[k]
return parameters
| 117 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[Any] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def _A ( lowercase ):
"""simple docstring"""
a ='''huggingface/label-files'''
a ='''imagenet-1k-id2label.json'''
a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
a ={int(lowercase ): v for k, v in idalabel.items()}
a ={v: k for k, v in idalabel.items()}
a ='''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
a =BitConfig(
conv_layer=lowercase , num_labels=10_00 , idalabel=lowercase , labelaid=lowercase , )
return config
def _A ( lowercase ):
"""simple docstring"""
if "stem.conv" in name:
a =name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
a =name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
a =name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
a ='''bit.''' + name
if "bit" not in name and "classifier" not in name:
a ='''bit.encoder.''' + name
return name
def _A ( ):
"""simple docstring"""
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
a =get_config(lowercase )
# load original model from timm
a =create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model
a =timm_model.state_dict()
for key in state_dict.copy().keys():
a =state_dict.pop(lowercase )
a =val.squeeze() if '''head''' in key else val
# load HuggingFace model
a =BitForImageClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# create image processor
a =create_transform(**resolve_data_config({} , model=lowercase ) )
a =transform.transforms
a ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
a =BitImageProcessor(
do_resize=lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a =prepare_img()
a =transform(lowercase ).unsqueeze(0 )
a =processor(lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase , lowercase )
# verify logits
with torch.no_grad():
a =model(lowercase )
a =outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
a =timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase_ : Tuple = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 215 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , __A = True , **__A , ) -> None:
super().__init__(**__A )
a =size if size is not None else {'''shortest_edge''': 224}
a =get_size_dict(__A , default_to_square=__A )
a =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a =get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' )
a =do_resize
a =size
a =resample
a =do_center_crop
a =crop_size
a =do_rescale
a =rescale_factor
a =do_normalize
a =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a =image_std if image_std is not None else OPENAI_CLIP_STD
a =do_convert_rgb
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a =get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> Any:
return rescale(__A , scale=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
a =do_resize if do_resize is not None else self.do_resize
a =size if size is not None else self.size
a =get_size_dict(__A , param_name='''size''' , default_to_square=__A )
a =resample if resample is not None else self.resample
a =do_center_crop if do_center_crop is not None else self.do_center_crop
a =crop_size if crop_size is not None else self.crop_size
a =get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A )
a =do_rescale if do_rescale is not None else self.do_rescale
a =rescale_factor if rescale_factor is not None else self.rescale_factor
a =do_normalize if do_normalize is not None else self.do_normalize
a =image_mean if image_mean is not None else self.image_mean
a =image_std if image_std is not None else self.image_std
a =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a =[convert_to_rgb(__A ) for image in images]
# All transformations expect numpy arrays.
a =[to_numpy_array(__A ) for image in images]
if do_resize:
a =[self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
a =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
a =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
a =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
a =[to_channel_dimension_format(__A , __A ) for image in images]
a ={'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A ) | 215 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 208 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , _a : List[Any] , _a : Dict=2 , _a : Dict=32 , _a : int=16 , _a : str=3 , _a : Optional[int]=True , _a : List[Any]=True , _a : int=32 , _a : int=4 , _a : Optional[Any]=[0, 1, 2, 3] , _a : int=4 , _a : Union[str, Any]=37 , _a : List[str]="gelu" , _a : List[str]=0.1 , _a : List[str]=0.1 , _a : Union[str, Any]=0.02 , _a : str=3 , _a : int=[1, 384, 24, 24] , _a : Optional[Any]=True , _a : Tuple=None , ) -> Tuple:
__lowerCamelCase : Dict = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : Any = patch_size
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : Dict = is_training
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = backbone_out_indices
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Dict = num_labels
__lowerCamelCase : List[Any] = backbone_featmap_shape
__lowerCamelCase : Optional[int] = scope
__lowerCamelCase : str = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[Any] = num_patches + 1
def _lowercase ( self : Dict ) -> Any:
__lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase : Optional[Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_a , backbone_featmap_shape=self.backbone_featmap_shape , )
def _lowercase ( self : Optional[int] , _a : int , _a : str , _a : Optional[int] ) -> Optional[int]:
__lowerCamelCase : Any = DPTModel(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : List[Any] ) -> List[Any]:
__lowerCamelCase : Dict = self.num_labels
__lowerCamelCase : List[Any] = DPTForDepthEstimation(_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[str] = model(_a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _lowercase ( self : List[str] , _a : Optional[Any] , _a : Tuple , _a : Tuple ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Optional[Any] = DPTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__lowerCamelCase : int = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a_ =(
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ =False
a_ =False
a_ =False
def _lowercase ( self : Dict ) -> Any:
__lowerCamelCase : Optional[Any] = DPTModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def _lowercase ( self : Dict ) -> Union[str, Any]:
pass
def _lowercase ( self : Dict ) -> str:
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowercase ( self : List[str] ) -> Any:
__lowerCamelCase ,__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(_a )
__lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def _lowercase ( self : Dict ) -> Any:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowercase ( self : Union[str, Any] ) -> int:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_a )
def _lowercase ( self : List[Any] ) -> Any:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
def _lowercase ( self : Optional[int] ) -> Dict:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCamelCase ,__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[int] = True
if model_class in get_values(_a ):
continue
__lowerCamelCase : Tuple = model_class(_a )
model.to(_a )
model.train()
__lowerCamelCase : str = self._prepare_for_class(_a , _a , return_labels=_a )
__lowerCamelCase : Dict = model(**_a ).loss
loss.backward()
def _lowercase ( self : Union[str, Any] ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = False
__lowerCamelCase : List[str] = True
if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing:
continue
__lowerCamelCase : Optional[int] = model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
__lowerCamelCase : List[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
__lowerCamelCase : str = model(**_a ).loss
loss.backward()
def _lowercase ( self : Dict ) -> Optional[Any]:
__lowerCamelCase ,__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = _config_zero_init(_a )
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(config=_a )
# Skip the check for the backbone
__lowerCamelCase : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__lowerCamelCase : Dict = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowercase ( self : Dict ) -> Optional[int]:
pass
@slow
def _lowercase ( self : Any ) -> int:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__lowerCamelCase : Union[str, Any] = DPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( self : List[Any] ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__lowerCamelCase ,__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = 'add'
with self.assertRaises(_a ):
__lowerCamelCase : int = DPTForDepthEstimation(_a )
def a_ ( ) -> str:
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) -> Tuple:
__lowerCamelCase : Any = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
__lowerCamelCase : Any = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_a )
__lowerCamelCase : Any = prepare_img()
__lowerCamelCase : Dict = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
__lowerCamelCase : Any = model(**_a )
__lowerCamelCase : Any = outputs.predicted_depth
# verify the predicted depth
__lowerCamelCase : int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _a )
__lowerCamelCase : Tuple = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _a , atol=1e-4 ) )
| 208 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE_: Union[str, Any] =16
SCREAMING_SNAKE_CASE_: List[Any] =32
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Dict:
'''simple docstring'''
return int(x / 2**20 )
class __A :
def __enter__(self : Tuple ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase_ = torch.cuda.memory_allocated()
return self
def __exit__(self : Union[str, Any] , *__a : Union[str, Any] ):
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
UpperCAmelCase_ = bamb(self.end - self.begin )
UpperCAmelCase_ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" , snake_case_ : int = 3_20 , snake_case_ : int = 1_60 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ = load_dataset(
"glue" , "mrpc" , split={"train": f"""train[:{n_train}]""", "validation": f"""validation[:{n_val}]"""} )
def tokenize_function(snake_case_ : Dict ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(snake_case_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
UpperCAmelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config["""lr"""]
UpperCAmelCase_ = int(config["num_epochs"] )
UpperCAmelCase_ = int(config["seed"] )
UpperCAmelCase_ = int(config["batch_size"] )
UpperCAmelCase_ = args.model_name_or_path
set_seed(snake_case_ )
UpperCAmelCase_ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
UpperCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase_ = 1
UpperCAmelCase_ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
UpperCAmelCase_ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ = 0
# Now we train the model
UpperCAmelCase_ = {}
for epoch in range(snake_case_ , snake_case_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case_ ):
UpperCAmelCase_ = model(**snake_case_ )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=snake_case_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , )
parser.add_argument(
"--output_dir" , type=snake_case_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=snake_case_ , default=snake_case_ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=snake_case_ , default=3_20 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=snake_case_ , default=1_60 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=snake_case_ , default=1 , help="Number of train epochs." , )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 355 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Optional[Any] = """resnet"""
a__ : Tuple = ["""basic""", """bottleneck"""]
def __init__(self : List[Any] , __a : Any=3 , __a : Dict=64 , __a : Union[str, Any]=[256, 512, 1024, 2048] , __a : str=[3, 4, 6, 3] , __a : Optional[Any]="bottleneck" , __a : Tuple="relu" , __a : int=False , __a : Optional[int]=None , __a : str=None , **__a : Dict , ):
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = downsample_in_first_stage
UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class __A ( UpperCamelCase__ ):
a__ : int = version.parse("""1.11""" )
@property
def _lowercase (self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase (self : str ):
return 1E-3
| 106 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_lowerCamelCase =[
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_lowerCamelCase ='''UperNetConfig'''
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,snake_case : int ,snake_case : Dict ,snake_case : Optional[Any] ,snake_case : int = 0 ,snake_case : int = False ,snake_case : int = 1 ,):
super().__init__()
SCREAMING_SNAKE_CASE =nn.Convad(
in_channels=__UpperCamelCase ,out_channels=__UpperCamelCase ,kernel_size=__UpperCamelCase ,padding=__UpperCamelCase ,bias=__UpperCamelCase ,dilation=__UpperCamelCase ,)
SCREAMING_SNAKE_CASE =nn.BatchNormad(__UpperCamelCase )
SCREAMING_SNAKE_CASE =nn.ReLU()
def _lowerCAmelCase ( self : int ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.conv(__UpperCamelCase )
SCREAMING_SNAKE_CASE =self.batch_norm(__UpperCamelCase )
SCREAMING_SNAKE_CASE =self.activation(__UpperCamelCase )
return output
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,snake_case : Union[str, Any] ,snake_case : Dict ,snake_case : Union[str, Any] ):
super().__init__()
SCREAMING_SNAKE_CASE =[
nn.AdaptiveAvgPoolad(__UpperCamelCase ),
UperNetConvModule(__UpperCamelCase ,__UpperCamelCase ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCamelCase ) ,__UpperCamelCase )
def _lowerCAmelCase ( self : Dict ,snake_case : int ):
SCREAMING_SNAKE_CASE =input
for layer in self.layers:
SCREAMING_SNAKE_CASE =layer(__UpperCamelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,snake_case : List[Any] ,snake_case : List[str] ,snake_case : Union[str, Any] ,snake_case : List[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE =pool_scales
SCREAMING_SNAKE_CASE =align_corners
SCREAMING_SNAKE_CASE =in_channels
SCREAMING_SNAKE_CASE =channels
SCREAMING_SNAKE_CASE =[]
for i, pool_scale in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE =UperNetPyramidPoolingBlock(pool_scale=__UpperCamelCase ,in_channels=__UpperCamelCase ,channels=__UpperCamelCase )
self.blocks.append(__UpperCamelCase )
self.add_module(str(__UpperCamelCase ) ,__UpperCamelCase )
def _lowerCAmelCase ( self : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =[]
for ppm in self.blocks:
SCREAMING_SNAKE_CASE =ppm(__UpperCamelCase )
SCREAMING_SNAKE_CASE =nn.functional.interpolate(
__UpperCamelCase ,size=x.size()[2:] ,mode='bilinear' ,align_corners=self.align_corners )
ppm_outs.append(__UpperCamelCase )
return ppm_outs
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : Tuple ,snake_case : Any ):
super().__init__()
SCREAMING_SNAKE_CASE =config
SCREAMING_SNAKE_CASE =config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE =in_channels
SCREAMING_SNAKE_CASE =config.hidden_size
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE =UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
SCREAMING_SNAKE_CASE =UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
SCREAMING_SNAKE_CASE =nn.ModuleList()
SCREAMING_SNAKE_CASE =nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE =UperNetConvModule(__UpperCamelCase ,self.channels ,kernel_size=1 )
SCREAMING_SNAKE_CASE =UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(__UpperCamelCase )
self.fpn_convs.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE =UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def _lowerCAmelCase ( self : Any ):
self.apply(self._init_weights )
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ):
if isinstance(__UpperCamelCase ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =inputs[-1]
SCREAMING_SNAKE_CASE =[x]
psp_outs.extend(self.psp_modules(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE =torch.cat(__UpperCamelCase ,dim=1 )
SCREAMING_SNAKE_CASE =self.bottleneck(__UpperCamelCase )
return output
def _lowerCAmelCase ( self : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =[lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCamelCase ) )
# build top-down path
SCREAMING_SNAKE_CASE =len(__UpperCamelCase )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
SCREAMING_SNAKE_CASE =laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE =laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=__UpperCamelCase ,mode='bilinear' ,align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE =[self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
SCREAMING_SNAKE_CASE =nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='bilinear' ,align_corners=self.align_corners )
SCREAMING_SNAKE_CASE =torch.cat(__UpperCamelCase ,dim=1 )
SCREAMING_SNAKE_CASE =self.fpn_bottleneck(__UpperCamelCase )
SCREAMING_SNAKE_CASE =self.classifier(__UpperCamelCase )
return output
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[str] = 2 ,snake_case : Optional[int] = 3 ,snake_case : Dict = 1 ):
super().__init__()
SCREAMING_SNAKE_CASE =config
SCREAMING_SNAKE_CASE =config.auxiliary_in_channels
SCREAMING_SNAKE_CASE =config.auxiliary_channels
SCREAMING_SNAKE_CASE =config.auxiliary_num_convs
SCREAMING_SNAKE_CASE =config.auxiliary_concat_input
SCREAMING_SNAKE_CASE =in_index
SCREAMING_SNAKE_CASE =(kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE =[]
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=__UpperCamelCase ,padding=__UpperCamelCase ,dilation=__UpperCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=__UpperCamelCase ,padding=__UpperCamelCase ,dilation=__UpperCamelCase ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE =nn.Identity()
else:
SCREAMING_SNAKE_CASE =nn.Sequential(*__UpperCamelCase )
if self.concat_input:
SCREAMING_SNAKE_CASE =UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=__UpperCamelCase ,padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE =nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def _lowerCAmelCase ( self : List[Any] ):
self.apply(self._init_weights )
def _lowerCAmelCase ( self : List[str] ,snake_case : int ):
if isinstance(__UpperCamelCase ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCAmelCase ( self : int ,snake_case : int ):
SCREAMING_SNAKE_CASE =encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE =self.convs(__UpperCamelCase )
if self.concat_input:
SCREAMING_SNAKE_CASE =self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
SCREAMING_SNAKE_CASE =self.classifier(__UpperCamelCase )
return output
class a_ ( _lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase = UperNetConfig
__UpperCAmelCase = 'pixel_values'
__UpperCAmelCase = True
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List[Any] ):
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCAmelCase ( self : int ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCAmelCase ( self : str ,snake_case : Dict ,snake_case : Any=False ):
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE =value
_lowerCamelCase =r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase =r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , _lowerCamelCase , )
class a_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : List[Any] ):
super().__init__(__UpperCamelCase )
SCREAMING_SNAKE_CASE =AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE =UperNetHead(__UpperCamelCase ,in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE =UperNetFCNHead(__UpperCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=__UpperCamelCase ,config_class=_CONFIG_FOR_DOC )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Optional[Any] = None ,snake_case : str = None ,snake_case : Tuple = None ,snake_case : List[str] = None ,snake_case : str = None ,):
SCREAMING_SNAKE_CASE =return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE =output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE =self.backbone.forward_with_filtered_kwargs(
__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,output_attentions=__UpperCamelCase )
SCREAMING_SNAKE_CASE =outputs.feature_maps
SCREAMING_SNAKE_CASE =self.decode_head(__UpperCamelCase )
SCREAMING_SNAKE_CASE =nn.functional.interpolate(__UpperCamelCase ,size=pixel_values.shape[2:] ,mode='bilinear' ,align_corners=__UpperCamelCase )
SCREAMING_SNAKE_CASE =None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE =self.auxiliary_head(__UpperCamelCase )
SCREAMING_SNAKE_CASE =nn.functional.interpolate(
__UpperCamelCase ,size=pixel_values.shape[2:] ,mode='bilinear' ,align_corners=__UpperCamelCase )
SCREAMING_SNAKE_CASE =None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE =CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE =loss_fct(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE =loss_fct(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE =main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE =(logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCamelCase ,logits=__UpperCamelCase ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 334 | import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase__ : Union[str, Any] = '''http://www.mocksite.com/file1.txt'''
lowerCAmelCase__ : Optional[Any] = '''"text": ["foo", "foo"]'''
lowerCAmelCase__ : List[str] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class __snake_case :
__lowerCamelCase = 200
__lowerCamelCase = {"""Content-Length""": """100"""}
__lowerCamelCase = {}
def __a ( self , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return [bytes(__UpperCamelCase , 'utf-8' )]
def UpperCamelCase__ ( *A__ , **A__ ) -> Optional[Any]:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
import requests
monkeypatch.setattr(A__ , 'request' , A__ )
snake_case__ : Any = URL
if issubclass(A__ , A__ ):
snake_case__ : Optional[Any] = url
elif issubclass(A__ , A__ ):
snake_case__ : Dict = [url]
elif issubclass(A__ , A__ ):
snake_case__ : Any = {'train': url}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[str] = 'downloads'
snake_case__ : int = tmp_path
snake_case__ : Tuple = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
snake_case__ : Any = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Any = dl_manager.download(A__ )
snake_case__ : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
snake_case__ : int = [downloaded_paths]
snake_case__ : Any = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
snake_case__ : Union[str, Any] = downloaded_paths.values()
snake_case__ : Any = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
snake_case__ : int = Path(A__ )
snake_case__ : Optional[int] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
snake_case__ : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
snake_case__ : int = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
snake_case__ : int = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
snake_case__ : Tuple = str(A__ )
if issubclass(A__ , A__ ):
snake_case__ : Dict = filename
elif issubclass(A__ , A__ ):
snake_case__ : Any = [filename]
elif issubclass(A__ , A__ ):
snake_case__ : Dict = {'train': filename}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[Any] = xz_file.parent
snake_case__ : Dict = 'extracted'
snake_case__ : List[Any] = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
snake_case__ : Optional[int] = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Optional[Any] = dl_manager.extract(A__ )
snake_case__ : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
snake_case__ : str = [extracted_paths]
snake_case__ : Dict = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
snake_case__ : Any = extracted_paths.values()
snake_case__ : Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
snake_case__ : Optional[int] = Path(A__ )
snake_case__ : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
snake_case__ : Dict = extracted_path.read_text()
snake_case__ : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(A__ , start=1 ):
snake_case__ : Optional[int] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
snake_case__ : Tuple = request.getfixturevalue(A__ )
snake_case__ : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> int:
snake_case__ : List[Any] = request.getfixturevalue(A__ )
snake_case__ : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase__ ( A__ ) -> Union[str, Any]:
snake_case__ : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 143 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "lxmert"
lowercase__ = {}
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=3_0_5_2_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : str=1_2 , lowerCAmelCase_ : str=9_5_0_0 , lowerCAmelCase_ : Optional[Any]=1_6_0_0 , lowerCAmelCase_ : List[str]=4_0_0 , lowerCAmelCase_ : Dict=3_0_7_2 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Any=5_1_2 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : Optional[Any]=1E-12 , lowerCAmelCase_ : str=9 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : Optional[Any]=2_0_4_8 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : str=6.67 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = num_qa_labels
lowercase_ = num_object_labels
lowercase_ = num_attr_labels
lowercase_ = l_layers
lowercase_ = x_layers
lowercase_ = r_layers
lowercase_ = visual_feat_dim
lowercase_ = visual_pos_dim
lowercase_ = visual_loss_normalizer
lowercase_ = task_matched
lowercase_ = task_mask_lm
lowercase_ = task_obj_predict
lowercase_ = task_qa
lowercase_ = visual_obj_loss
lowercase_ = visual_attr_loss
lowercase_ = visual_feat_loss
lowercase_ = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**lowerCAmelCase_)
| 351 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313 | 0 |
def __lowerCamelCase ( lowerCamelCase__ : List[str] = 10**9 ):
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 2
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 252 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 0 |
'''simple docstring'''
from math import factorial
def __lowerCAmelCase ( UpperCamelCase__ = 1_00 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 366 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =["pixel_values"]
def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : int = 0.9 , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Dict , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = crop_pct
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : Any , a : np.ndarray , a : Dict[str, int] , a : Optional[float] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__lowerCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__lowerCamelCase = int(size['''height'''] / crop_pct )
else:
__lowerCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(a ) )
__lowerCamelCase = get_resize_output_image_size(a , size=a , default_to_square=a )
else:
if "shortest_edge" in size:
__lowerCamelCase = get_resize_output_image_size(a , size=size['''shortest_edge'''] , default_to_square=a )
elif "height" in size and "width" in size:
__lowerCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(a ) )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(a , size=(size['''height'''], size['''width''']) , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ):
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ):
"""simple docstring"""
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(a , a ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=a , tensor_type=a )
| 237 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : int):
lowercase__ : Union[str, Any] = LxmertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : List[Any] = LxmertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 87 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase="pt" ) ->Tuple:
"""simple docstring"""
a_ = {"add_prefix_space": True} if isinstance(UpperCAmelCase , UpperCAmelCase ) and not line.startswith(" " ) else {}
a_ = padding_side
return tokenizer(
[line] , max_length=UpperCAmelCase , padding="max_length" if pad_to_max_length else None , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ) ->Tuple:
"""simple docstring"""
a_ = input_ids.ne(UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="train" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , ) ->Any:
super().__init__()
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".source")
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".target")
a_ = self.get_char_lens(self.src_file)
a_ = max_source_length
a_ = max_target_length
assert min(self.src_lens) > 0, F'''found empty line in {self.src_file}'''
a_ = tokenizer
a_ = prefix
if n_obs is not None:
a_ = self.src_lens[:n_obs]
a_ = src_lang
a_ = tgt_lang
def __len__( self) ->Optional[Any]:
return len(self.src_lens)
def __getitem__( self , __UpperCAmelCase) ->Dict[str, torch.Tensor]:
a_ = index + 1 # linecache starts at 1
a_ = self.prefix + linecache.getline(str(self.src_file) , __UpperCAmelCase).rstrip("\n")
a_ = linecache.getline(str(self.tgt_file) , __UpperCAmelCase).rstrip("\n")
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __UpperCAmelCase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
a_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
)
a_ = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , "right")
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , "right")
a_ = source_inputs["input_ids"].squeeze()
a_ = target_inputs["input_ids"].squeeze()
a_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase) ->Dict:
return [len(__UpperCAmelCase) for x in Path(__UpperCAmelCase).open().readlines()]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Dict[str, torch.Tensor]:
a_ = torch.stack([x["input_ids"] for x in batch])
a_ = torch.stack([x["attention_mask"] for x in batch])
a_ = torch.stack([x["decoder_input_ids"] for x in batch])
a_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase)
a_ , a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase_ = getLogger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(UpperCAmelCase ) )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = get_git_info()
save_json(UpperCAmelCase , os.path.join(UpperCAmelCase , "git_log.json" ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=4 , **UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with open(UpperCAmelCase , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase , indent=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with open(UpperCAmelCase ) as f:
return json.load(UpperCAmelCase )
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = git.Repo(search_parent_directories=UpperCAmelCase )
a_ = {
"repo_id": str(UpperCAmelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List:
"""simple docstring"""
return list(map(UpperCAmelCase , UpperCAmelCase ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , "wb" ) as f:
return pickle.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
def remove_articles(UpperCAmelCase ):
return re.sub(r"\b(a|an|the)\b" , " " , UpperCAmelCase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
a_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase ) ) ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = normalize_answer(UpperCAmelCase ).split()
a_ = normalize_answer(UpperCAmelCase ).split()
a_ = Counter(UpperCAmelCase ) & Counter(UpperCAmelCase )
a_ = sum(common.values() )
if num_same == 0:
return 0
a_ = 1.0 * num_same / len(UpperCAmelCase )
a_ = 1.0 * num_same / len(UpperCAmelCase )
a_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return normalize_answer(UpperCAmelCase ) == normalize_answer(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
a_ = 0
for hypo, pred in zip(UpperCAmelCase , UpperCAmelCase ):
em += exact_match_score(UpperCAmelCase , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
em /= len(UpperCAmelCase )
return {"em": em}
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return model_prefix.startswith("rag" )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
a_ = "dropout_rate"
for p in extra_params:
if getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if not hasattr(UpperCAmelCase , UpperCAmelCase ) and not hasattr(UpperCAmelCase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(UpperCAmelCase ) )
delattr(UpperCAmelCase , UpperCAmelCase )
continue
a_ = p if hasattr(UpperCAmelCase , UpperCAmelCase ) else equivalent_param[p]
setattr(UpperCAmelCase , UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
delattr(UpperCAmelCase , UpperCAmelCase )
return hparams, config | 243 | 0 |
from math import pi
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 352 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __lowercase ):
A_ = ['image_processor', 'tokenizer']
A_ = 'ChineseCLIPImageProcessor'
A_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[Any] , _snake_case : List[Any]=None , _snake_case : str=None , **_snake_case : int )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _snake_case , )
__lowerCAmelCase : List[str] = kwargs.pop("""feature_extractor""" )
__lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_snake_case , _snake_case )
__lowerCAmelCase : Any = self.image_processor
def __call__( self : Optional[Any] , _snake_case : Tuple=None , _snake_case : Tuple=None , _snake_case : List[str]=None , **_snake_case : Any )->Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__lowerCAmelCase : List[Any] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__lowerCAmelCase : Tuple = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__lowerCAmelCase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def UpperCAmelCase__ ( self : Optional[int] , *_snake_case : Union[str, Any] , **_snake_case : int )->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Dict , *_snake_case : Dict , **_snake_case : Any )->Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def UpperCAmelCase__ ( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.tokenizer.model_input_names
__lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _snake_case , )
return self.image_processor_class | 232 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class _lowercase :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = {}
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=1 ) -> Optional[Any]:
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__lowerCAmelCase = [[w, v]]
if not self.graph.get(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = []
def a ( self : Tuple ) -> Optional[int]:
return list(self.graph )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=-2 , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ) -> Optional[int]:
if s == d:
return []
__lowerCAmelCase = []
__lowerCAmelCase = []
if s == -2:
__lowerCAmelCase = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__lowerCAmelCase = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__lowerCAmelCase = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 ) -> int:
if c == -1:
__lowerCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__lowerCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str=-2 ) -> Tuple:
__lowerCAmelCase = deque()
__lowerCAmelCase = []
if s == -2:
__lowerCAmelCase = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
__lowerCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
__lowerCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str=-2 ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = []
if s == -2:
__lowerCAmelCase = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = s
__lowerCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__lowerCAmelCase = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__lowerCAmelCase = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return sorted_nodes
def a ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = -2
__lowerCAmelCase = []
__lowerCAmelCase = s
__lowerCAmelCase = False
__lowerCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCAmelCase = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__lowerCAmelCase = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__lowerCAmelCase = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = s
__lowerCAmelCase = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Any:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = -2
__lowerCAmelCase = []
__lowerCAmelCase = s
__lowerCAmelCase = False
__lowerCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCAmelCase = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__lowerCAmelCase = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__lowerCAmelCase = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = s
__lowerCAmelCase = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int]=-2 , SCREAMING_SNAKE_CASE__ : Tuple=-1 ) -> str:
__lowerCAmelCase = time()
self.dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = time()
return end - begin
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=-2 ) -> Tuple:
__lowerCAmelCase = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = time()
return end - begin
class _lowercase :
'''simple docstring'''
def __init__( self : Tuple ) -> Dict:
__lowerCAmelCase = {}
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=1 ) -> Optional[Any]:
# check if the u exists
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__lowerCAmelCase = [[w, v]]
# add the other way
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__lowerCAmelCase = [[w, u]]
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
# the other way round
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Tuple=-2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 ) -> Optional[Any]:
if s == d:
return []
__lowerCAmelCase = []
__lowerCAmelCase = []
if s == -2:
__lowerCAmelCase = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__lowerCAmelCase = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__lowerCAmelCase = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=-1 ) -> Optional[Any]:
if c == -1:
__lowerCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__lowerCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=-2 ) -> List[Any]:
__lowerCAmelCase = deque()
__lowerCAmelCase = []
if s == -2:
__lowerCAmelCase = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
__lowerCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> Any:
return len(self.graph[u] )
def a ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = -2
__lowerCAmelCase = []
__lowerCAmelCase = s
__lowerCAmelCase = False
__lowerCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCAmelCase = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__lowerCAmelCase = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__lowerCAmelCase = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = s
__lowerCAmelCase = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = -2
__lowerCAmelCase = []
__lowerCAmelCase = s
__lowerCAmelCase = False
__lowerCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCAmelCase = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__lowerCAmelCase = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__lowerCAmelCase = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = s
__lowerCAmelCase = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def a ( self : Optional[Any] ) -> Optional[Any]:
return list(self.graph )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=-2 , SCREAMING_SNAKE_CASE__ : Any=-1 ) -> Optional[int]:
__lowerCAmelCase = time()
self.dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = time()
return end - begin
def a ( self : str , SCREAMING_SNAKE_CASE__ : Dict=-2 ) -> Dict:
__lowerCAmelCase = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = time()
return end - begin
| 229 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = KandinskyVaaControlnetImgaImgPipeline
_SCREAMING_SNAKE_CASE : Dict = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_SCREAMING_SNAKE_CASE : List[Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_SCREAMING_SNAKE_CASE : Dict = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : int ) -> Optional[Any]:
return 32
@property
def a ( self : Union[str, Any] ) -> Dict:
return 32
@property
def a ( self : str ) -> Union[str, Any]:
return self.time_input_dim
@property
def a ( self : Tuple ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def a ( self : Union[str, Any] ) -> List[Any]:
return 1_00
@property
def a ( self : Optional[int] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def a ( self : Tuple ) -> Optional[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self : Any ) -> Dict:
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__lowerCAmelCase = DDIMScheduler(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a ( self : List[Any] ) -> int:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Any ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = init_image.resize((5_12, 5_12) )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__lowerCAmelCase = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__ ) ).float() / 2_5_5.0
__lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCAmelCase = """A robot, 4k photo"""
__lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , strength=0.8_5 , generator=SCREAMING_SNAKE_CASE__ , negative_prompt="""""" , ).to_tuple()
__lowerCAmelCase = pipeline(
image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , hint=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 229 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : Optional[Any] = logging.get_logger(__name__)
__A : Tuple = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Any = 'imagegpt'
lowercase : List[str] = ['past_key_values']
lowercase : str = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self :Tuple ,_UpperCamelCase :Optional[int]=5_1_2 + 1 ,_UpperCamelCase :Tuple=3_2 * 3_2 ,_UpperCamelCase :Dict=5_1_2 ,_UpperCamelCase :List[Any]=2_4 ,_UpperCamelCase :Any=8 ,_UpperCamelCase :int=None ,_UpperCamelCase :int="quick_gelu" ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Optional[Any]=0.1 ,_UpperCamelCase :Any=1E-5 ,_UpperCamelCase :Optional[int]=0.02 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :List[str]=True ,_UpperCamelCase :Dict=False ,_UpperCamelCase :int=False ,_UpperCamelCase :Optional[int]=False ,**_UpperCamelCase :Any ,):
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[int] = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[int] = n_layer
snake_case_ : Optional[int] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : List[str] = activation_function
snake_case_ : str = resid_pdrop
snake_case_ : Optional[int] = embd_pdrop
snake_case_ : Optional[Any] = attn_pdrop
snake_case_ : Tuple = layer_norm_epsilon
snake_case_ : Optional[Any] = initializer_range
snake_case_ : List[Any] = scale_attn_weights
snake_case_ : Tuple = use_cache
snake_case_ : Dict = scale_attn_by_inverse_layer_idx
snake_case_ : List[str] = reorder_and_upcast_attn
snake_case_ : str = tie_word_embeddings
super().__init__(tie_word_embeddings=_UpperCamelCase ,**_UpperCamelCase )
class __UpperCamelCase ( lowercase__ ):
@property
def a__ ( self :int ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def a__ ( self :Tuple ,_UpperCamelCase :"FeatureExtractionMixin" ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = -1 ,_UpperCamelCase :bool = False ,_UpperCamelCase :Optional["TensorType"] = None ,_UpperCamelCase :int = 3 ,_UpperCamelCase :int = 3_2 ,_UpperCamelCase :int = 3_2 ,):
snake_case_ : Dict = self._generate_dummy_images(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[str] = dict(preprocessor(images=_UpperCamelCase ,return_tensors=_UpperCamelCase ) )
return inputs | 352 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__A : int = logging.getLogger()
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case_ : int = parser.parse_args()
return args.f
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" )
if os.path.exists(lowerCamelCase_ ):
with open(lowerCamelCase_ , """r""" ) as f:
snake_case_ : str = json.load(lowerCamelCase_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
__A : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCamelCase ( lowercase__ ):
@classmethod
def a__ ( cls :Dict ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def a__ ( cls :int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Optional[int] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : Dict = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : str = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertLess(result["""train_loss"""] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[str] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[int] = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] ,2_8 )
self.assertGreaterEqual(result["""eval_exact"""] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Union[str, Any] = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Union[str, Any] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[Any] = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : int = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 )
self.assertGreaterEqual(result["""eval_rouge2"""] ,2 )
self.assertGreaterEqual(result["""eval_rougeL"""] ,7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : Tuple = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[Any] = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Any = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) )
@slow
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Any ):
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) ) | 8 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A =re.compile(r'\s+')
def snake_case_ (_a : List[Any] ):
return {"hash": hashlib.mda(re.sub(_a , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def snake_case_ (_a : Tuple ):
UpperCAmelCase = [len(_a ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(_a ), "line_max": max(_a )}
def snake_case_ (_a : Optional[int] ):
UpperCAmelCase = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def snake_case_ (_a : Any , _a : str ):
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def snake_case_ (_a : int , _a : List[str]=5 ):
UpperCAmelCase = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
UpperCAmelCase = example['''content'''].splitlines()
for _, line in zip(range(_a ) , _a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case_ (_a : Dict , _a : int=5 , _a : Union[str, Any]=0.05 ):
UpperCAmelCase = ['''unit tests''', '''test file''', '''configuration file''']
UpperCAmelCase = example['''content'''].splitlines()
UpperCAmelCase = 0
UpperCAmelCase = 0
# first test
for _, line in zip(range(_a ) , _a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCAmelCase = example['''content'''].count('''\n''' )
UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case_ (_a : List[Any] ):
UpperCAmelCase = ['''def ''', '''class ''', '''for ''', '''while ''']
UpperCAmelCase = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case_ (_a : Optional[Any] , _a : Any=4 ):
UpperCAmelCase = example['''content'''].splitlines()
UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case_ (_a : List[str] ):
UpperCAmelCase = tokenizer(example['''content'''] , truncation=_a )['''input_ids''']
UpperCAmelCase = len(example['''content'''] ) / len(_a )
return {"ratio": ratio}
def snake_case_ (_a : str ):
UpperCAmelCase = {}
results.update(get_hash(_a ) )
results.update(line_stats(_a ) )
results.update(alpha_stats(_a ) )
results.update(char_token_ratio(_a ) )
results.update(is_autogenerated(_a ) )
results.update(is_config_or_test(_a ) )
results.update(has_no_keywords(_a ) )
results.update(has_few_assignments(_a ) )
return results
def snake_case_ (_a : Dict , _a : Dict , _a : Dict ):
if not check_uniques(_a , _a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case_ (_a : Optional[Any] ):
with open(_a , '''rb''' ) as f_in:
with gzip.open(str(_a ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_a , _a )
os.unlink(_a )
# Settings
A =HfArgumentParser(PreprocessingArguments)
A =parser.parse_args()
if args.num_workers is None:
A =multiprocessing.cpu_count()
A =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A =time.time()
A =load_dataset(args.dataset_name, split='train')
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
A =time.time()
A =ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
A =set(ds.unique('hash'))
A =len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
A =time.time()
A =ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A =time.time()
A , A =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
A =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
A =output_dir / 'data'
data_dir.mkdir(exist_ok=True)
A =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A =str(data_dir / f"""file-{file_number+1:012}.json""")
A =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 34 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10)) | 243 | 0 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase_ ( a_ ):
def __get__( self , snake_case__ , snake_case__=None ) -> Any:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
UpperCAmelCase = """__cached_""" + self.fget.__name__
UpperCAmelCase = getattr(snake_case__ , snake_case__ , snake_case__ )
if cached is None:
UpperCAmelCase = self.fget(snake_case__ )
setattr(snake_case__ , snake_case__ , snake_case__ )
return cached
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if is_torch_fx_proxy(lowerCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase , np.ndarray )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return isinstance(lowerCAmelCase , np.ndarray )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return _is_numpy(lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
import torch
return isinstance(lowerCAmelCase , torch.Tensor )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
import torch
return isinstance(lowerCAmelCase , torch.device )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
else:
return False
return isinstance(lowerCAmelCase , torch.dtype )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
import tensorflow as tf
return isinstance(lowerCAmelCase , tf.Tensor )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(lowerCAmelCase )
return type(lowerCAmelCase ) == tf.Tensor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase , jnp.ndarray )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase , (list, tuple) ):
return [to_py_obj(lowerCAmelCase ) for o in obj]
elif is_tf_tensor(lowerCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase ):
return np.asarray(lowerCAmelCase ).tolist()
elif isinstance(lowerCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase , (list, tuple) ):
return np.array(lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase ):
return np.asarray(lowerCAmelCase )
else:
return obj
class UpperCamelCase_ ( a_ ):
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = fields(self )
# Safety and consistency checks
if not len(snake_case__ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase = getattr(self , class_fields[0].name )
UpperCAmelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(snake_case__ ):
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase = first_field.items()
UpperCAmelCase = True
else:
try:
UpperCAmelCase = iter(snake_case__ )
UpperCAmelCase = True
except TypeError:
UpperCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case__ ):
if (
not isinstance(snake_case__ , (list, tuple) )
or not len(snake_case__ ) == 2
or not isinstance(element[0] , snake_case__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase = element[1]
elif first_field is not None:
UpperCAmelCase = first_field
else:
for field in class_fields:
UpperCAmelCase = getattr(self , field.name )
if v is not None:
UpperCAmelCase = v
def __delitem__( self , *snake_case__ , **snake_case__ ) -> List[str]:
"""simple docstring"""
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> List[str]:
"""simple docstring"""
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
"""simple docstring"""
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self , snake_case__ ) -> List[str]:
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case__ , snake_case__ )
super().__setattr__(snake_case__ , snake_case__ )
def __setitem__( self , snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
super().__setitem__(snake_case__ , snake_case__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCamelCase_ ( a_ , a_ ):
@classmethod
def UpperCamelCase_ ( cls , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCamelCase_ ( a_ ):
_A : Optional[Any] = 'longest'
_A : str = 'max_length'
_A : Union[str, Any] = 'do_not_pad'
class UpperCamelCase_ ( a_ ):
_A : Union[str, Any] = 'pt'
_A : Union[str, Any] = 'tf'
_A : int = 'np'
_A : Optional[int] = 'jax'
class UpperCamelCase_ :
def __init__( self , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = context_managers
UpperCAmelCase = ExitStack()
def __enter__( self ) -> Optional[int]:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(snake_case__ )
def __exit__( self , *snake_case__ , **snake_case__ ) -> str:
"""simple docstring"""
self.stack.__exit__(*snake_case__ , **snake_case__ )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = infer_framework(lowerCAmelCase )
if framework == "tf":
UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = model_class.__name__
UpperCAmelCase = infer_framework(lowerCAmelCase )
if framework == "tf":
UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = "" , lowerCAmelCase = "." ):
'''simple docstring'''
def _flatten_dict(lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase="." ):
for k, v in d.items():
UpperCAmelCase = str(lowerCAmelCase ) + delimiter + str(lowerCAmelCase ) if parent_key else k
if v and isinstance(lowerCAmelCase , lowerCAmelCase ):
yield from flatten_dict(lowerCAmelCase , lowerCAmelCase , delimiter=lowerCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) )
@contextmanager
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=None ):
'''simple docstring'''
if is_numpy_array(lowerCAmelCase ):
return np.transpose(lowerCAmelCase , axes=lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.T if axes is None else array.permute(*lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase , perm=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.transpose(lowerCAmelCase , axes=lowerCAmelCase )
else:
raise ValueError(F'''Type not supported for transpose: {type(lowerCAmelCase )}.''' )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if is_numpy_array(lowerCAmelCase ):
return np.reshape(lowerCAmelCase , lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.reshape(*lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase , lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.reshape(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(F'''Type not supported for reshape: {type(lowerCAmelCase )}.''' )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=None ):
'''simple docstring'''
if is_numpy_array(lowerCAmelCase ):
return np.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
else:
raise ValueError(F'''Type not supported for squeeze: {type(lowerCAmelCase )}.''' )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if is_numpy_array(lowerCAmelCase ):
return np.expand_dims(lowerCAmelCase , lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.unsqueeze(dim=lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase , axis=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.expand_dims(lowerCAmelCase , axis=lowerCAmelCase )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(lowerCAmelCase )}.''' )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if is_numpy_array(lowerCAmelCase ):
return np.size(lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.size(lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(lowerCAmelCase )}.''' )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(lowerCAmelCase , (tuple, list) ):
UpperCAmelCase = [F'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase = F'''{repo_id}--{value}'''
return auto_map
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for base_class in inspect.getmro(lowerCAmelCase ):
UpperCAmelCase = base_class.__module__
UpperCAmelCase = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 248 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( a_ ):
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def UpperCamelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(snake_case__ )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ )
return metrics
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_test_dataloader(snake_case__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions , """predict""" )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
| 248 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__snake_case : Union[str, Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__snake_case : Optional[Any] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__snake_case : Optional[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] = False , _SCREAMING_SNAKE_CASE: Optional[Any] = False , _SCREAMING_SNAKE_CASE: Optional[Any] = False , _SCREAMING_SNAKE_CASE: int = False , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = len(references[0])
if any(len(_UpperCAmelCase) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
__lowerCAmelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_UpperCAmelCase)]
__lowerCAmelCase : Any = TER(
normalized=_UpperCAmelCase , no_punct=_UpperCAmelCase , asian_support=_UpperCAmelCase , case_sensitive=_UpperCAmelCase , )
__lowerCAmelCase : Tuple = sb_ter.corpus_score(_UpperCAmelCase , _UpperCAmelCase)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 269 |
'''simple docstring'''
import random
def _lowerCAmelCase ( __snake_case : int , __snake_case : float , __snake_case : bool = False ) -> dict:
__A : dict = {i: [] for i in range(__snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if random.random() < probability:
graph[i].append(__snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__snake_case )
return graph
def _lowerCAmelCase ( __snake_case : int ) -> dict:
return {
i: [j for j in range(__snake_case ) if i != j] for i in range(__snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( lowercase : list , lowercase : int | None = None , lowercase : int | None = None ) -> None:
"""simple docstring"""
if start is None:
snake_case : List[str] = 0
if end is None:
snake_case : str = len(lowercase ) - 1
if start >= end:
return
snake_case : List[str] = (start + end) // 2
slowsort(lowercase , lowercase , lowercase )
slowsort(lowercase , mid + 1 , lowercase )
if sequence[end] < sequence[mid]:
snake_case : Dict = sequence[mid], sequence[end]
slowsort(lowercase , lowercase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 112 | 0 |
def _A ( SCREAMING_SNAKE_CASE__ : int = 1000 ):
UpperCamelCase :int = -1
UpperCamelCase :Any = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase :Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase :Optional[int] = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase :int = a * b * c
if candidate >= product:
UpperCamelCase :Tuple = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__snake_case = """__DUMMY_TRANSFORMERS_USER__"""
__snake_case = """Dummy User"""
__snake_case = """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"""
__snake_case = """https://hub-ci.huggingface.co"""
__snake_case = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
__snake_case = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
__snake_case = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , SCREAMING_SNAKE_CASE__ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def _A ( ):
return HfApi(endpoint=SCREAMING_SNAKE_CASE__ )
@pytest.fixture(scope='''session''' )
def _A ( SCREAMING_SNAKE_CASE__ : HfApi ):
UpperCamelCase :Tuple = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
def _cleanup_repo(SCREAMING_SNAKE_CASE__ : Tuple ):
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
@contextmanager
def _temporary_repo(SCREAMING_SNAKE_CASE__ : Any ):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE__ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def _A ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :Union[str, Any] = F'''repo_txt_data-{int(time.time() * 1_0e3 )}'''
UpperCamelCase :int = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data/text_data.txt''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def _A ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 1_0e3 )}'''
UpperCamelCase :Any = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data.zip''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def _A ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Dict = F'''repo_zipped_img_data-{int(time.time() * 1_0e3 )}'''
UpperCamelCase :Dict = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data.zip''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
return hf_private_dataset_repo_zipped_img_data_
| 259 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :str = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Optional[int] , **a__ : Optional[int] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__magic_name__ = deprecated_arg[3:]
setattr(self , a__ , not kwargs.pop(a__ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
__magic_name__ = kwargs.pop('''torchscript''' , self.torchscript )
__magic_name__ = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__magic_name__ = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**a__ )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Trace the models using torchscript"""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__SCREAMING_SNAKE_CASE :str = field(
default="""O1""" ,metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} ,)
@cached_property
def snake_case__ ( self : int ):
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__magic_name__ = torch.device('''cpu''' )
__magic_name__ = 0
elif is_torch_tpu_available():
__magic_name__ = xm.xla_device()
__magic_name__ = 0
else:
__magic_name__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__magic_name__ = torch.cuda.device_count()
return device, n_gpu
@property
def snake_case__ ( self : Union[str, Any] ):
return is_torch_tpu_available() and self.tpu
@property
def snake_case__ ( self : Optional[Any] ):
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def snake_case__ ( self : List[str] ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def snake_case__ ( self : List[str] ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def snake_case__ ( self : List[str] ):
return self.n_gpu > 0
| 98 |
'''simple docstring'''
from random import randint, random
def UpperCamelCase ( a , a , a , a = False , a = False , a = 5 , ) -> list:
'''simple docstring'''
__magic_name__ = [[-1] * number_of_cells] # Create a highway without any car
__magic_name__ = 0
__magic_name__ = max(a , 0 )
while i < number_of_cells:
__magic_name__ = (
randint(0 , a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = highway_now[car_index + 1 :]
for cell in range(len(a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(a , -1 )
def UpperCamelCase ( a , a , a ) -> list:
'''simple docstring'''
__magic_name__ = len(a )
# Beforce calculations, the highway is empty
__magic_name__ = [-1] * number_of_cells
for car_index in range(a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__magic_name__ = min(highway_now[car_index] + 1 , a )
# Number of empty cell before the next car
__magic_name__ = get_distance(a , a ) - 1
# We can't have the car causing an accident
__magic_name__ = min(next_highway[car_index] , a )
if random() < probability:
# Randomly, a driver will slow down
__magic_name__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCamelCase ( a , a , a , a ) -> list:
'''simple docstring'''
__magic_name__ = len(highway[0] )
for i in range(a ):
__magic_name__ = update(highway[i] , a , a )
__magic_name__ = [-1] * number_of_cells
for car_index in range(a ):
__magic_name__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__magic_name__ = (car_index + speed) % number_of_cells
# Commit the change of position
__magic_name__ = speed
highway.append(a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ : int = 1_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase_ :Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCAmelCase : Tuple =_symbol_database.Default()
UpperCAmelCase : List[Any] =_descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
UpperCAmelCase : Optional[int] =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCAmelCase : str =None
UpperCAmelCase : List[Any] =b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCAmelCase : str =45
UpperCAmelCase : Optional[Any] =1581
UpperCAmelCase : Dict =1517
UpperCAmelCase : str =1570
UpperCAmelCase : Optional[int] =1584
UpperCAmelCase : str =1793
UpperCAmelCase : Any =1795
UpperCAmelCase : Dict =1916
UpperCAmelCase : str =1864
UpperCAmelCase : Dict =1905
UpperCAmelCase : Union[str, Any] =1919
UpperCAmelCase : Any =2429
UpperCAmelCase : Dict =2208
UpperCAmelCase : int =2418
UpperCAmelCase : str =2323
UpperCAmelCase : Any =2407
# @@protoc_insertion_point(module_scope)
| 128 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _a ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Dict = get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase__, nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0, dtype=torch.floataa ) ).item(), 0 )
self.assertNotEqual(act(torch.tensor(-1, dtype=torch.floataa ) ).item(), 0 )
self.assertEqual(act(torch.tensor(0, dtype=torch.floataa ) ).item(), 0 )
self.assertEqual(act(torch.tensor(2_0, dtype=torch.floataa ) ).item(), 2_0 )
def snake_case ( self : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase__, nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0, dtype=torch.floataa ) ).item(), 0 )
self.assertNotEqual(act(torch.tensor(-1, dtype=torch.floataa ) ).item(), 0 )
self.assertEqual(act(torch.tensor(0, dtype=torch.floataa ) ).item(), 0 )
self.assertEqual(act(torch.tensor(2_0, dtype=torch.floataa ) ).item(), 2_0 )
def snake_case ( self : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase__, nn.Mish )
self.assertEqual(act(torch.tensor(-2_0_0, dtype=torch.floataa ) ).item(), 0 )
self.assertNotEqual(act(torch.tensor(-1, dtype=torch.floataa ) ).item(), 0 )
self.assertEqual(act(torch.tensor(0, dtype=torch.floataa ) ).item(), 0 )
self.assertEqual(act(torch.tensor(2_0, dtype=torch.floataa ) ).item(), 2_0 )
def snake_case ( self : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase__, nn.GELU )
self.assertEqual(act(torch.tensor(-1_0_0, dtype=torch.floataa ) ).item(), 0 )
self.assertNotEqual(act(torch.tensor(-1, dtype=torch.floataa ) ).item(), 0 )
self.assertEqual(act(torch.tensor(0, dtype=torch.floataa ) ).item(), 0 )
self.assertEqual(act(torch.tensor(2_0, dtype=torch.floataa ) ).item(), 2_0 )
| 369 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase_ =logging.getLogger(__name__)
class _a ( _lowerCAmelCase ):
UpperCamelCase = '''masked_bert'''
def __init__( self : Optional[Any], lowerCAmelCase__ : Dict=3_0_5_2_2, lowerCAmelCase__ : int=7_6_8, lowerCAmelCase__ : Tuple=1_2, lowerCAmelCase__ : Optional[Any]=1_2, lowerCAmelCase__ : Tuple=3_0_7_2, lowerCAmelCase__ : Optional[int]="gelu", lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : Any=5_1_2, lowerCAmelCase__ : Optional[int]=2, lowerCAmelCase__ : Optional[int]=0.02, lowerCAmelCase__ : Union[str, Any]=1e-1_2, lowerCAmelCase__ : Union[str, Any]=0, lowerCAmelCase__ : Dict="topK", lowerCAmelCase__ : Union[str, Any]="constant", lowerCAmelCase__ : Union[str, Any]=0.0, **lowerCAmelCase__ : Any, ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : int = pruning_method
_UpperCamelCase : Union[str, Any] = mask_init
_UpperCamelCase : Any = mask_scale
| 128 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 199 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class a__ ( lowerCamelCase_ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 199 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.