code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = XLNetTokenizer
SCREAMING_SNAKE_CASE__ = XLNetTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
a :Dict = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = '''<s>'''
a :Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(_lowerCamelCase ) , 1006 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
a :Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] )
a :Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a :Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
a :int = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
a :Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
a :Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
a :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
a :Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
a :Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
a :Any = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# fmt: off
a :Dict = {'''input_ids''': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 94 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case : Union[str, Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ):
"""simple docstring"""
a :List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a :int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
self.register_modules(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , )
a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if latents is None:
a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
a :Any = latents.to(_lowerCamelCase )
a :Dict = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a :int = torch.device(F'''cuda:{gpu_id}''' )
a :int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a :Any = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a :Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase )
# We'll offload the last model manually.
a :str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
a :int = self._execution_device
a :Optional[Any] = guidance_scale > 1.0
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 )
a :Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :List[str] = torch.cat(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
a :Optional[Any] = self.scheduler.timesteps
a :List[str] = self.unet.config.in_channels
a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor )
# create initial latent
a :int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a :Union[str, Any] = {'''image_embeds''': image_embeds}
a :Optional[Any] = self.unet(
sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
if do_classifier_free_guidance:
a , a :Any = noise_pred.split(latents.shape[1] , dim=1 )
a , a :List[str] = noise_pred.chunk(2 )
a , a :int = variance_pred.chunk(2 )
a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a :int = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0]
# post-processing
a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
a :str = image * 0.5 + 0.5
a :List[Any] = image.clamp(0 , 1 )
a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a :str = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 94 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str ={
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =[
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356 | '''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __A :
@property
def _lowercase (self : int ):
return self.get_dummy_input()
@property
def _lowercase (self : Any ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def _lowercase (self : Tuple , __a : List[str]=True , __a : Any=False , __a : List[Any]=False , __a : Any=False , ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 32
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = torch.device(__a )
UpperCAmelCase_ = (batch_size, num_channels) + sizes
UpperCAmelCase_ = randn_tensor(__a , generator=__a , device=__a )
UpperCAmelCase_ = {"hidden_states": hidden_states}
if include_temb:
UpperCAmelCase_ = 128
UpperCAmelCase_ = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a )
if include_res_hidden_states_tuple:
UpperCAmelCase_ = torch.manual_seed(1 )
UpperCAmelCase_ = (randn_tensor(__a , generator=__a , device=__a ),)
if include_encoder_hidden_states:
UpperCAmelCase_ = floats_tensor((batch_size, 32, 32) ).to(__a )
if include_skip_sample:
UpperCAmelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a )
return dummy_input
def _lowercase (self : Tuple ):
UpperCAmelCase_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
UpperCAmelCase_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : Tuple , __a : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.block_class(**__a )
unet_block.to(__a )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase_ = unet_block(**__a )
if isinstance(__a , __a ):
UpperCAmelCase_ = output[0]
self.assertEqual(output.shape , self.output_shape )
UpperCAmelCase_ = output[0, -1, -3:, -3:]
UpperCAmelCase_ = torch.tensor(__a ).to(__a )
assert torch_all_close(output_slice.flatten() , __a , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def _lowercase (self : Dict ):
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.block_class(**__a )
model.to(__a )
model.train()
UpperCAmelCase_ = model(**__a )
if isinstance(__a , __a ):
UpperCAmelCase_ = output[0]
UpperCAmelCase_ = torch.device(__a )
UpperCAmelCase_ = randn_tensor(output.shape , device=__a )
UpperCAmelCase_ = torch.nn.functional.mse_loss(__a , __a )
loss.backward()
| 106 | 0 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any]=1_3, UpperCAmelCase__ : Union[str, Any]=7, UpperCAmelCase__ : Tuple=True, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : Optional[int]=False, UpperCAmelCase__ : Union[str, Any]=True, UpperCAmelCase__ : Tuple=9_9, UpperCAmelCase__ : Optional[Any]=6_4, UpperCAmelCase__ : str=5, UpperCAmelCase__ : Optional[Any]=4, UpperCAmelCase__ : List[str]=6_4, UpperCAmelCase__ : Any="gelu", UpperCAmelCase__ : int=0.1, UpperCAmelCase__ : Tuple=0.1, UpperCAmelCase__ : Optional[Any]=5_1_2, UpperCAmelCase__ : Union[str, Any]=1_6, UpperCAmelCase__ : List[Any]=2, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : str=3, UpperCAmelCase__ : List[str]=4, UpperCAmelCase__ : Dict=None, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _lowercase ( self : List[str] ):
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _lowercase ( self : Union[str, Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
__lowercase = ids_tensor([self.batch_size], self.num_choices )
__lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Dict ):
return MPNetConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = MPNetModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : int, UpperCAmelCase__ : str ):
__lowercase = MPNetForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(
UpperCAmelCase__, attention_mask=UpperCAmelCase__, start_positions=UpperCAmelCase__, end_positions=UpperCAmelCase__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.num_labels
__lowercase = MPNetForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowercase ( self : int, UpperCAmelCase__ : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Any, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.num_choices
__lowercase = MPNetForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
__lowercase = model(
UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any] ):
__lowercase = self.num_labels
__lowercase = MPNetForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : int ):
__lowercase = self.prepare_config_and_inputs()
((__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase)) = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = True
def _lowercase ( self : int ):
__lowercase = MPNetModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase__ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict ):
__lowercase = MPNetModel.from_pretrained("microsoft/mpnet-base" )
__lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowercase = model(UpperCAmelCase__ )[0]
__lowercase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape, UpperCAmelCase__ )
__lowercase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3], UpperCAmelCase__, atol=1E-4 ) )
| 17 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173 | 0 |
"""simple docstring"""
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= len(lowercase__ )
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if numbers[j] < numbers[i]:
__lowercase, __lowercase= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 367 |
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({'''summary''': Value('''string''' )} )
lowerCamelCase__ = '''text'''
lowerCamelCase__ = '''summary'''
@property
def __A ( self : Tuple ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 118 |
"""simple docstring"""
from __future__ import annotations
import math
def _A ( lowercase ):
"""simple docstring"""
if num <= 0:
a =f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowercase )
a =[True] * (num + 1)
a =[]
a =2
a =int(math.sqrt(lowercase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowercase )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowercase ):
if sieve[i] is True:
a =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowercase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip()))) | 81 | 0 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase_( snake_case__: int , snake_case__: Tuple , snake_case__: str=8 ) -> List[str]:
UpperCAmelCase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a , __a , __a , __a , __a , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , movq=__a , )
UpperCAmelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if latents is None:
UpperCAmelCase__ = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase__ = latents.to(__a )
UpperCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ (self , __a , __a , __a , __a , __a=None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = len(__a ) if isinstance(__a , __a ) else 1
# get prompt text embeddings
UpperCAmelCase__ = self.tokenizer(
__a , padding='max_length' , truncation=__a , max_length=77 , return_attention_mask=__a , add_special_tokens=__a , return_tensors='pt' , )
UpperCAmelCase__ = text_inputs.input_ids
UpperCAmelCase__ = self.tokenizer(__a , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__a , __a ):
UpperCAmelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase__ = text_input_ids.to(__a )
UpperCAmelCase__ = text_inputs.attention_mask.to(__a )
UpperCAmelCase__ , UpperCAmelCase__ = self.text_encoder(
input_ids=__a , attention_mask=__a )
UpperCAmelCase__ = prompt_embeds.repeat_interleave(__a , dim=0 )
UpperCAmelCase__ = text_encoder_hidden_states.repeat_interleave(__a , dim=0 )
UpperCAmelCase__ = text_mask.repeat_interleave(__a , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ = 42
if negative_prompt is None:
UpperCAmelCase__ = [''] * batch_size
elif type(__a ) is not type(__a ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !="
F" {type(__a )}." )
elif isinstance(__a , __a ):
UpperCAmelCase__ = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
UpperCAmelCase__ = negative_prompt
UpperCAmelCase__ = self.tokenizer(
__a , padding='max_length' , max_length=77 , truncation=__a , return_attention_mask=__a , add_special_tokens=__a , return_tensors='pt' , )
UpperCAmelCase__ = uncond_input.input_ids.to(__a )
UpperCAmelCase__ = uncond_input.attention_mask.to(__a )
UpperCAmelCase__ , UpperCAmelCase__ = self.text_encoder(
input_ids=__a , attention_mask=__a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ = negative_prompt_embeds.shape[1]
UpperCAmelCase__ = negative_prompt_embeds.repeat(1 , __a )
UpperCAmelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __a )
UpperCAmelCase__ = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase__ = uncond_text_encoder_hidden_states.repeat(1 , __a , 1 )
UpperCAmelCase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , __a , -1 )
UpperCAmelCase__ = uncond_text_mask.repeat_interleave(__a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCamelCase__ (self , __a=0 ) -> str:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase__ = torch.device(F"cuda:{gpu_id}" )
UpperCAmelCase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def UpperCamelCase__ (self , __a=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase__ = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
if self.safety_checker is not None:
UpperCAmelCase__ , UpperCAmelCase__ = cpu_offload_with_hook(self.safety_checker , __a , prev_module_hook=__a )
# We'll offload the last model manually.
UpperCAmelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__(self , __a , __a , __a , __a = None , __a = 512 , __a = 512 , __a = 100 , __a = 4.0 , __a = 1 , __a = None , __a = None , __a = "pil" , __a = True , ) -> Tuple:
"""simple docstring"""
if isinstance(__a , __a ):
UpperCAmelCase__ = 1
elif isinstance(__a , __a ):
UpperCAmelCase__ = len(__a )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__a )}" )
UpperCAmelCase__ = self._execution_device
UpperCAmelCase__ = batch_size * num_images_per_prompt
UpperCAmelCase__ = guidance_scale > 1.0
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._encode_prompt(
__a , __a , __a , __a , __a )
if isinstance(__a , __a ):
UpperCAmelCase__ = torch.cat(__a , dim=0 )
if isinstance(__a , __a ):
UpperCAmelCase__ = torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ = image_embeds.repeat_interleave(__a , dim=0 )
UpperCAmelCase__ = negative_image_embeds.repeat_interleave(__a , dim=0 )
UpperCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
UpperCAmelCase__ = self.scheduler.timesteps
UpperCAmelCase__ = self.unet.config.in_channels
UpperCAmelCase__ , UpperCAmelCase__ = get_new_h_w(__a , __a , self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
UpperCAmelCase__ = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ = variance_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(
__a , __a , __a , generator=__a , ).prev_sample
# post-processing
UpperCAmelCase__ = self.movq.decode(__a , force_not_quantize=__a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase__ = image * 0.5 + 0.5
UpperCAmelCase__ = image.clamp(0 , 1 )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 335 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCAmelCase__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sgugger/tiny-distilbert-classification'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 335 | 1 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
__lowercase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowercase )
__lowercase : List[str] = -1
__lowercase : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowercase )
__lowercase : Tuple = model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase )
__lowercase : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase : str = TextStreamer(__lowercase )
model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase , streamer=__lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase : Optional[Any] = cs.out[:-1]
self.assertEqual(__lowercase , __lowercase )
def _lowerCamelCase ( self ) -> str:
__lowercase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowercase )
__lowercase : Union[str, Any] = -1
__lowercase : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowercase )
__lowercase : Dict = model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase )
__lowercase : Dict = tokenizer.decode(greedy_ids[0] )
__lowercase : List[str] = TextIteratorStreamer(__lowercase )
__lowercase : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase : Any = Thread(target=model.generate , kwargs=__lowercase )
thread.start()
__lowercase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowercase , __lowercase )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowercase )
__lowercase : Dict = -1
__lowercase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowercase )
__lowercase : int = model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase )
__lowercase : Union[str, Any] = greedy_ids[:, input_ids.shape[1] :]
__lowercase : Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase : Dict = TextStreamer(__lowercase , skip_prompt=__lowercase )
model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase , streamer=__lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase : Tuple = cs.out[:-1]
self.assertEqual(__lowercase , __lowercase )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Tuple = AutoTokenizer.from_pretrained('''distilgpt2''' )
__lowercase : str = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowercase )
__lowercase : List[Any] = -1
__lowercase : Tuple = torch.ones((1, 5) , device=__lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__lowercase : Union[str, Any] = TextStreamer(__lowercase , skip_special_tokens=__lowercase )
model.generate(__lowercase , max_new_tokens=1 , do_sample=__lowercase , streamer=__lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__lowercase : Union[str, Any] = cs.out[:-1] # Remove the final "\n"
__lowercase : List[str] = tokenizer(__lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _lowerCamelCase ( self ) -> str:
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowercase )
__lowercase : str = -1
__lowercase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowercase )
__lowercase : Dict = TextIteratorStreamer(__lowercase , timeout=0.0_0_1 )
__lowercase : Optional[int] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase : int = Thread(target=model.generate , kwargs=__lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowercase ):
__lowercase : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 249 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase :
def __init__( self : Optional[int] , __lowercase : Dict , __lowercase : Optional[Any]=3 , __lowercase : Union[str, Any]=7 , __lowercase : Any=True , __lowercase : List[Any]=True , __lowercase : Union[str, Any]=False , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : Dict=5 , __lowercase : Union[str, Any]=4 , __lowercase : List[Any]=37 , __lowercase : str="gelu" , __lowercase : int=0.1 , __lowercase : Dict=0.1 , __lowercase : Any=512 , __lowercase : List[str]=16 , __lowercase : Tuple=2 , __lowercase : Tuple=0.0_2 , __lowercase : List[str]=3 , __lowercase : Union[str, Any]=4 , __lowercase : List[Any]=None , ):
"""simple docstring"""
__lowercase =parent
__lowercase =batch_size
__lowercase =seq_length
__lowercase =is_training
__lowercase =use_input_mask
__lowercase =use_token_type_ids
__lowercase =use_labels
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =type_sequence_label_size
__lowercase =initializer_range
__lowercase =num_labels
__lowercase =num_choices
__lowercase =scope
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase =None
if self.use_input_mask:
__lowercase =random_attention_mask([self.batch_size, self.seq_length] )
__lowercase =None
__lowercase =None
__lowercase =None
__lowercase =None
if self.use_labels:
__lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase =ids_tensor([self.batch_size] , self.num_choices )
__lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Tuple ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowercase , )
def snake_case ( self : str , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
__lowercase =FalconModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowercase =model(__lowercase , attention_mask=__lowercase )
__lowercase =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Optional[Any] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : List[str] , ):
"""simple docstring"""
__lowercase =True
__lowercase =FalconModel(__lowercase )
model.to(__lowercase )
model.eval()
__lowercase =model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__lowercase =model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
__lowercase =model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Union[str, Any] , __lowercase : int , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : str , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Optional[int] , ):
"""simple docstring"""
__lowercase =FalconForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowercase =model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : str , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Tuple , ):
"""simple docstring"""
__lowercase =True
__lowercase =True
__lowercase =FalconForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
__lowercase =model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
__lowercase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase =torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase =torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase =model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )['hidden_states'][0]
__lowercase =model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['hidden_states'][0]
# select random slice
__lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase =output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) =config_and_inputs
__lowercase ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( A , A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =FalconModelTester(self )
__lowercase =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def snake_case ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase , *__lowercase =self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__lowercase =alibi
self.model_tester.create_and_check_model(__lowercase , *__lowercase )
def snake_case ( self : str ):
"""simple docstring"""
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
__lowercase =3
__lowercase =input_dict['input_ids']
__lowercase =input_ids.ne(1 ).to(__lowercase )
__lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase =FalconForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowercase =model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
__lowercase =3
__lowercase ='single_label_classification'
__lowercase =input_dict['input_ids']
__lowercase =input_ids.ne(1 ).to(__lowercase )
__lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase =FalconForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowercase =model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self : int ):
"""simple docstring"""
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
__lowercase =input_dict['input_ids']
__lowercase =FalconForCausalLM(__lowercase )
model.to(__lowercase )
model.eval()
__lowercase =model(__lowercase , use_cache=__lowercase )
__lowercase =input_ids.shape[0]
__lowercase =model._convert_to_rw_cache(result.past_key_values )
__lowercase =model._convert_cache_to_standard_format(__lowercase , __lowercase )
for layer in range(len(__lowercase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
__lowercase =3
__lowercase ='multi_label_classification'
__lowercase =input_dict['input_ids']
__lowercase =input_ids.ne(1 ).to(__lowercase )
__lowercase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase =FalconForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowercase =model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self : Tuple ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowercase , 'use_cache' ):
return
__lowercase =model_class(__lowercase ).to(__lowercase )
if "use_cache" not in inputs:
__lowercase =True
__lowercase =model(**__lowercase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__lowercase =(
getattr(__lowercase , 'decoder_layers' , __lowercase )
or getattr(__lowercase , 'num_decoder_layers' , __lowercase )
or config.num_hidden_layers
)
__lowercase =getattr(__lowercase , 'num_kv_heads' , config.num_attention_heads )
__lowercase =getattr(__lowercase , 'd_model' , config.hidden_size )
__lowercase =embed_dim // num_attention_heads
__lowercase =outputs['past_key_values']
self.assertEqual(len(__lowercase ) , __lowercase )
__lowercase , __lowercase =inputs['input_ids'].shape
for i in range(__lowercase ):
if config.new_decoder_architecture:
__lowercase =config.num_attention_heads
elif config.multi_query:
__lowercase =1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
__lowercase =FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(__lowercase )
__lowercase =tokenizer('My favorite food is' , return_tensors='pt' ).to(__lowercase )
__lowercase =(
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
__lowercase =model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=19 )
__lowercase =tokenizer.batch_decode(__lowercase )[0]
self.assertEqual(__lowercase , __lowercase )
@slow
def snake_case ( self : Dict ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__lowercase =AutoTokenizer.from_pretrained(__lowercase )
__lowercase =FalconForCausalLM.from_pretrained(__lowercase )
model.eval()
model.to(__lowercase )
__lowercase =tokenizer('My favorite food is' , return_tensors='pt' ).to(__lowercase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=4 )
model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=4 )
model.generate(**__lowercase , num_beams=2 , max_new_tokens=4 )
@slow
def snake_case ( self : Tuple ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__lowercase =AutoTokenizer.from_pretrained(__lowercase )
__lowercase =FalconForCausalLM.from_pretrained(__lowercase )
model.eval()
model.to(device=__lowercase )
__lowercase =tokenizer('My favorite food is' , return_tensors='pt' ).to(__lowercase )
# Test results are the same with and without cache
__lowercase =model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=20 , use_cache=__lowercase )
__lowercase =model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=20 , use_cache=__lowercase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 141 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__UpperCamelCase = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
__UpperCamelCase = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class _A ( lowerCamelCase__ ):
lowercase__: Optional[Any] = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: str = PRETRAINED_INIT_CONFIGURATION
lowercase__: Dict = RoFormerTokenizer
def __init__( self : Tuple , __magic_name__ : Dict=None , __magic_name__ : Any=None , __magic_name__ : List[str]=True , __magic_name__ : Dict="[UNK]" , __magic_name__ : Optional[Any]="[SEP]" , __magic_name__ : int="[PAD]" , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=None , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
__snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , __magic_name__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , __magic_name__ ) != strip_accents
):
__snake_case : Dict = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : str = do_lower_case
__snake_case : int = strip_accents
__snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : Tuple , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : Dict = d
__snake_case : Optional[int] = self.__dict__["_tokenizer"].get_vocab()
__snake_case : int = PreTokenizer.custom(JiebaPreTokenizer(__magic_name__ ) )
def lowercase__ ( self : Any , __magic_name__ : Dict , __magic_name__ : List[str]=None ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Dict , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
__snake_case : Dict = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> int:
"""simple docstring"""
__snake_case : str = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None , __magic_name__ : Dict=False , **__magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = BertPreTokenizer()
return super().save_pretrained(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCamelCase : Dict = "\\n Text data.\n Second line of data."
lowerCamelCase : List[str] = "file"
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
_SCREAMING_SNAKE_CASE =bytes(_UpperCamelCase , 'utf-8' )
with zstd.open(_UpperCamelCase , 'wb' ) as f:
f.write(_UpperCamelCase )
return path
@pytest.fixture
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , _UpperCamelCase ) , 'w' ) as f:
f.write(_UpperCamelCase )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
_SCREAMING_SNAKE_CASE =input_paths[compression_format]
_SCREAMING_SNAKE_CASE =tmp_path / 'cache'
_SCREAMING_SNAKE_CASE =DownloadConfig(cache_dir=_UpperCamelCase , extract_compressed_file=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =cached_path(_UpperCamelCase , download_config=_UpperCamelCase )
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.read()
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='custom_cache'
_SCREAMING_SNAKE_CASE ='custom_extracted_dir'
_SCREAMING_SNAKE_CASE =tmp_path / 'custom_extracted_path'
if default_extracted:
_SCREAMING_SNAKE_CASE =('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , _UpperCamelCase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_SCREAMING_SNAKE_CASE =xz_file
_SCREAMING_SNAKE_CASE =(
DownloadConfig(extract_compressed_file=_UpperCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCamelCase )
)
_SCREAMING_SNAKE_CASE =cached_path(_UpperCamelCase , download_config=_UpperCamelCase )
assert Path(_UpperCamelCase ).parent.parts[-2:] == expected
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =str(Path(_UpperCamelCase ).resolve() )
assert cached_path(_UpperCamelCase ) == text_file
# relative path
_SCREAMING_SNAKE_CASE =str(Path(_UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCamelCase ) == text_file
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(_UpperCamelCase ):
cached_path(_UpperCamelCase )
# relative path
_SCREAMING_SNAKE_CASE ='./__missing_file__.txt'
with pytest.raises(_UpperCamelCase ):
cached_path(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_from_cache(f"tmp://{tmpfs_file}" )
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase )
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(_UpperCamelCase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_UpperCamelCase ):
http_get('https://huggingface.co' , temp_file=_UpperCamelCase )
with pytest.raises(_UpperCamelCase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_UpperCamelCase ):
ftp_get('ftp://huggingface.co' , temp_file=_UpperCamelCase )
with pytest.raises(_UpperCamelCase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_UpperCamelCase ):
fsspec_get('s3://huggingface.co' , temp_file=_UpperCamelCase )
with pytest.raises(_UpperCamelCase ):
fsspec_head('s3://huggingface.co' )
| 47 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = '''data2vec-text'''
def __init__( self , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=1 , A__=0 , A__=2 , A__="absolute" , A__=True , A__=None , **A__ , ):
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
A__ : Tuple = vocab_size
A__ : str = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : List[str] = hidden_act
A__ : Any = intermediate_size
A__ : Tuple = hidden_dropout_prob
A__ : str = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : int = type_vocab_size
A__ : Optional[int] = initializer_range
A__ : List[str] = layer_norm_eps
A__ : Optional[Any] = position_embedding_type
A__ : int = use_cache
A__ : Optional[Any] = classifier_dropout
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __A ( self ):
if self.task == "multiple-choice":
A__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 351 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = '''MCTCTFeatureExtractor'''
UpperCAmelCase__: Optional[int] = '''AutoTokenizer'''
def __init__( self , A__ , A__ ):
super().__init__(A__ , A__ )
A__ : List[str] = self.feature_extractor
A__ : Optional[int] = False
def __call__( self , *A__ , **A__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A__ : Dict = kwargs.pop("""raw_speech""" )
else:
A__ : Tuple = kwargs.pop("""audio""" , A__ )
A__ : Union[str, Any] = kwargs.pop("""sampling_rate""" , A__ )
A__ : int = kwargs.pop("""text""" , A__ )
if len(A__ ) > 0:
A__ : Optional[int] = args[0]
A__ : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A__ : List[str] = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
A__ : Optional[Any] = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ : List[Any] = encodings["""input_ids"""]
return inputs
def __A ( self , *A__ , **A__ ):
return self.tokenizer.batch_decode(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ , **A__ )
A__ : Optional[Any] = kwargs.pop("""input_features""" , A__ )
A__ : Union[str, Any] = kwargs.pop("""labels""" , A__ )
if len(A__ ) > 0:
A__ : List[Any] = args[0]
A__ : Optional[int] = args[1:]
if input_features is not None:
A__ : Union[str, Any] = self.feature_extractor.pad(A__ , *A__ , **A__ )
if labels is not None:
A__ : List[Any] = self.tokenizer.pad(A__ , **A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ : Dict = labels["""input_ids"""]
return input_features
def __A ( self , *A__ , **A__ ):
return self.tokenizer.decode(*A__ , **A__ )
@contextmanager
def __A ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A__ : int = True
A__ : List[Any] = self.tokenizer
yield
A__ : Tuple = self.feature_extractor
A__ : Dict = False
| 141 | 0 |
def _A ( _lowercase , _lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 |
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : int = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCAmelCase_ : List[str] = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( A : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Any = LEDTokenizer
_UpperCamelCase : List[Any] = LEDTokenizerFast
_UpperCamelCase : List[Any] = True
def lowercase ( self: List[Any] ) -> Any:
"""simple docstring"""
super().setUp()
UpperCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase_ = {"unk_token": "<unk>"}
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def lowercase ( self: Any , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , **_SCREAMING_SNAKE_CASE: Any ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def lowercase ( self: Tuple ) -> List[Any]:
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def lowercase ( self: Any ) -> List[Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def lowercase ( self: List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase_ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ = tokenizer(_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_torch
def lowercase ( self: int ) -> str:
"""simple docstring"""
UpperCamelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIn("input_ids" , _SCREAMING_SNAKE_CASE )
self.assertIn("attention_mask" , _SCREAMING_SNAKE_CASE )
self.assertNotIn("labels" , _SCREAMING_SNAKE_CASE )
self.assertNotIn("decoder_attention_mask" , _SCREAMING_SNAKE_CASE )
@require_torch
def lowercase ( self: str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ = tokenizer(text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def lowercase ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowercase ( self: List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = ["A long paragraph for summarization."]
UpperCamelCase_ = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase_ = tokenizer(text_target=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase_ = inputs["input_ids"]
UpperCamelCase_ = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowercase ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ = ["Summary of the text.", "Another summary."]
UpperCamelCase_ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [[0] * len(_SCREAMING_SNAKE_CASE ) for x in encoded_output["input_ids"]]
UpperCamelCase_ = tokenizer.pad(_SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs["global_attention_mask"] , _SCREAMING_SNAKE_CASE )
def lowercase ( self: int ) -> int:
"""simple docstring"""
pass
def lowercase ( self: str ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_ = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "A, <mask> AllenNLP sentence."
UpperCamelCase_ = tokenizer_r.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer_p.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 328 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , F'''{split}_results.json''' ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(UpperCamelCase_ , UpperCamelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_ = SeqaSeqDataset
# Get datasets
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCamelCase_ ) if training_args.predict_with_generate else None
)
UpperCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , data_args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , data_collator=SeqaSeqDataCollator(
UpperCamelCase_ , UpperCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
UpperCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCamelCase_ = data_args.n_val
UpperCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCamelCase_ = trainer.predict(test_dataset=UpperCamelCase_ , metric_key_prefix="test" )
UpperCamelCase_ = test_output.metrics
UpperCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.predict_with_generate:
UpperCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
UpperCamelCase_ = lmap(str.strip , UpperCamelCase_ )
write_txt_file(UpperCamelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCamelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 328 | 1 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE_ : Dict = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'hopper-medium-v2'
SCREAMING_SNAKE_CASE_ : Tuple = gym.make(env_name)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE_ : Dict = env.reset()
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Any = 1_0_0_0
SCREAMING_SNAKE_CASE_ : str = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : int = env.step(denorm_actions)
SCREAMING_SNAKE_CASE_ : List[str] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE_ : Optional[Any] = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 335 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCAmelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
__lowercase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowercase : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowercase : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowercase : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Any = pipeline(
task="""text-classification""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,framework="""pt""" )
lowerCAmelCase__ : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowerCAmelCase__ : List[str] = text_classifier("""This is great !""" ,top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
lowerCAmelCase__ : Tuple = text_classifier(["""This is great !""", """This is bad"""] ,top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,[
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] ,)
lowerCAmelCase__ : Optional[int] = text_classifier("""This is great !""" ,top_k=1 )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
lowerCAmelCase__ : Optional[Any] = text_classifier("""This is great !""" ,return_all_scores=__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowerCAmelCase__ : Union[str, Any] = text_classifier("""This is great !""" ,return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,[[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
lowerCAmelCase__ : int = text_classifier(["""This is great !""", """Something else"""] ,return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,[
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] ,)
lowerCAmelCase__ : Dict = text_classifier(["""This is great !""", """Something else"""] ,return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,[
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] ,)
@require_torch
def UpperCAmelCase_ ( self ) -> List[Any]:
import torch
lowerCAmelCase__ : Optional[Any] = pipeline(
task="""text-classification""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,framework="""pt""" ,device=torch.device("""cpu""" ) ,)
lowerCAmelCase__ : Optional[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Optional[Any] = pipeline(
task="""text-classification""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,framework="""tf""" )
lowerCAmelCase__ : List[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Tuple = pipeline("""text-classification""" )
lowerCAmelCase__ : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """POSITIVE""", """score""": 1.0}] )
lowerCAmelCase__ : Optional[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowerCAmelCase__ : List[str] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = pipeline("""text-classification""" ,framework="""tf""" )
lowerCAmelCase__ : Any = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """POSITIVE""", """score""": 1.0}] )
lowerCAmelCase__ : Any = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowerCAmelCase__ : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Any = TextClassificationPipeline(model=__UpperCAmelCase ,tokenizer=__UpperCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Tuple = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCAmelCase__ : Tuple = """HuggingFace is in"""
lowerCAmelCase__ : Union[str, Any] = text_classifier(__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) ,[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
lowerCAmelCase__ : Tuple = ["""HuggingFace is in """, """Paris is in France"""]
lowerCAmelCase__ : List[str] = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] ,)
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCAmelCase__ : int = text_classifier(__UpperCAmelCase ,top_k=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,[[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] ,)
lowerCAmelCase__ : List[str] = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowerCAmelCase__ : List[Any] = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} ,)
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCAmelCase__ : Optional[int] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__UpperCAmelCase ):
text_classifier(__UpperCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCAmelCase__ : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] ,)
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 184 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = True , UpperCamelCase = math.inf , UpperCamelCase = -math.inf , UpperCamelCase = math.inf , UpperCamelCase = -math.inf , UpperCamelCase = False , UpperCamelCase = 100 , UpperCamelCase = 0.01 , UpperCamelCase = 1 , ):
"""simple docstring"""
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Optional[Any] = search_prob
lowerCAmelCase__ : Tuple = start_temperate
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = None
while not search_end:
lowerCAmelCase__ : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCAmelCase__ : List[str] = current_state
scores.append(UpperCamelCase )
iterations += 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCAmelCase__ : Tuple = random.randint(0 , len(UpperCamelCase ) - 1 ) # picking a random neighbor
lowerCAmelCase__ : str = neighbors.pop(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCAmelCase__ : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCAmelCase__ : Tuple = picked_neighbor
else:
lowerCAmelCase__ : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCAmelCase__ : List[Any] = picked_neighbor
lowerCAmelCase__ : Optional[int] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCAmelCase__ : str = True
else:
lowerCAmelCase__ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(UpperCamelCase ) , UpperCamelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
_lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
_lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 184 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A__ : str =transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
_lowerCAmelCase = [image]
_lowerCAmelCase = [trans(img.convert("""RGB""" ) ) for img in image]
_lowerCAmelCase = torch.stack(lowerCAmelCase )
return image
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Dict , __snake_case : List[str] , __snake_case : Tuple ) -> Optional[int]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> str:
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def lowercase__ ( self : Dict , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Any ) -> Any:
# get the original timestep using init_timestep
_lowerCAmelCase = min(int(num_inference_steps * strength ) , __snake_case )
_lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self : str , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[Any]=None ) -> Tuple:
if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}" )
_lowerCAmelCase = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_lowerCAmelCase = init_latents.shape
_lowerCAmelCase = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
print("""add noise to latents at timestep""" , __snake_case )
_lowerCAmelCase = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
_lowerCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Optional[Any] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] = None , __snake_case : float = 0.8 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 50 , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(__snake_case )
# 2. Preprocess image
_lowerCAmelCase = preprocess(__snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
_lowerCAmelCase , _lowerCAmelCase = self.get_timesteps(__snake_case , __snake_case , self.device )
_lowerCAmelCase = timesteps[:1].repeat(__snake_case )
# 4. Prepare latent variables
_lowerCAmelCase = self.prepare_latents(__snake_case , __snake_case , __snake_case , self.unet.dtype , self.device , __snake_case )
_lowerCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__snake_case ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case , ).prev_sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__snake_case )
| 70 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ : Optional[int] = "src/diffusers"
a_ : int = "."
# This is to make sure the diffusers module imported is the one in the repo.
a_ : List[Any] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ : Tuple = spec.loader.load_module()
def _A (lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , lowerCAmelCase__ ) is not None
def _A (lowerCAmelCase__ :List[str] ) -> str:
'''simple docstring'''
_a = object_name.split('.' )
_a = 0
# First let's find the module where our object lives.
_a = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
_a = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , f'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_a = f.readlines()
# Now let's find the class / func in the code!
_a = ''
_a = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_a = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_a = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
a_ : Union[str, Any] = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a_ : Tuple = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
a_ : Dict = re.compile(R"<FILL\s+[^>]*>")
def _A (lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
_a = code.split('\n' )
_a = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _A (lowerCAmelCase__ :List[Any] ) -> Dict:
'''simple docstring'''
_a = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
_a = f'class Bla:\n{code}'
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=lowerCAmelCase__ )
_a = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
_a , _a = style_docstrings_in_code(lowerCAmelCase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def _A (lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=False ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_a = f.readlines()
_a = []
_a = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
_a = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_a , _a , _a = search.groups()
_a = find_code_in_diffusers(lowerCAmelCase__ )
_a = get_indent(lowerCAmelCase__ )
_a = line_index + 1 if indent == theoretical_indent else line_index + 2
_a = theoretical_indent
_a = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_a = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
_a = lines[line_index]
_a = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_a = lines[start_index:line_index]
_a = ''.join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
_a = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
_a = '\n'.join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
_a = replace_pattern.replace('with' , '' ).split(',' )
_a = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_a , _a , _a = pattern.groups()
_a = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
_a = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
_a = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_a = blackify(lines[start_index - 1] + theoretical_code )
_a = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_a = lines[:start_index] + [theoretical_code] + lines[line_index:]
_a = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def _A (lowerCAmelCase__ :bool = False ) -> Any:
'''simple docstring'''
_a = glob.glob(os.path.join(lowerCAmelCase__ , '**/*.py' ) , recursive=lowerCAmelCase__ )
_a = []
for filename in all_files:
_a = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
_a = '\n'.join(lowerCAmelCase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ : Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 104 |
'''simple docstring'''
from __future__ import annotations
def _A (lowerCAmelCase__ :int ) -> list[int]:
'''simple docstring'''
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase__ )
if n > 1:
factors.append(lowerCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : Union[str, Any]=2_81_23 ):
'''simple docstring'''
__lowercase =[1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
__lowercase =set()
__lowercase =0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 141 | 0 |
'''simple docstring'''
def a_ ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def a_ ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def a_ ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def a_ ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=9_9 , UpperCAmelCase__ : Any=3_6 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=1_0_0_0 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = text_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = coordinate_size
lowerCAmelCase = shape_size
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase = text_seq_length
lowerCAmelCase = (image_size // patch_size) ** 2 + 1
lowerCAmelCase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> str:
lowerCAmelCase = LayoutLMvaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# text + image
lowerCAmelCase = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase = model(pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=False ) -> Optional[int]:
lowerCAmelCase = copy.deepcopy(UpperCAmelCase__ )
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
return inputs_dict
def __UpperCAmelCase ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Any ) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = LayoutLMvaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : int ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : int ) -> Any:
lowerCAmelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([[1, 2]] )
lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase = model(
input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 55 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase__ : Optional[int] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowercase__ : Union[str, Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowercase__ : List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self )-> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 4 , )-> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE_ , hypotheses=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ )
}
| 328 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(snake_case : Optional[int] ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any:
'''simple docstring'''
__UpperCamelCase = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase = batch
__UpperCamelCase = model(snake_case )
__UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing"
lowercase__ : List[Any] = DummyModel()
lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ : str = dummy_dataloaders()
lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 328 | 1 |
'''simple docstring'''
__UpperCAmelCase :dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
__UpperCAmelCase :dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def _a ( _lowercase : float , _lowercase : str , _lowercase : str ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCAmelCase : Dict = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(UpperCamelCase__ )}'
)
raise ValueError(UpperCamelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 367 |
'''simple docstring'''
def _a ( _lowercase : int = 600851475143 ):
'''simple docstring'''
try:
__UpperCAmelCase : str = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[str] = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : int = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : List[str] = n
return int(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""") | 240 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=13 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=True , __lowerCamelCase : str=99 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : int=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : int=37 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Any=False , __lowerCamelCase : Any=True , __lowerCamelCase : Any="None" , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : int=None , ):
'''simple docstring'''
lowerCamelCase__ : Any = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Tuple = use_input_mask
lowerCamelCase__ : str = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : List[str] = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Tuple = num_choices
lowerCamelCase__ : List[Any] = relative_attention
lowerCamelCase__ : Dict = position_biased_input
lowerCamelCase__ : Dict = pos_att_type
lowerCamelCase__ : Tuple = scope
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Dict = None
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : int = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDebertaVaModel(config=__lowerCamelCase )
lowerCamelCase__ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : List[str] = model(__lowerCamelCase )
lowerCamelCase__ : str = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDebertaVaForMaskedLM(config=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase__ : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Any = TFDebertaVaForSequenceClassification(config=__lowerCamelCase )
lowerCamelCase__ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase__ : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.num_labels
lowerCamelCase__ : Optional[Any] = TFDebertaVaForTokenClassification(config=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase__ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TFDebertaVaForQuestionAnswering(config=__lowerCamelCase )
lowerCamelCase__ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase__ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
A__ = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TFDebertaVaModelTester(self )
lowerCamelCase__ : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
lowerCamelCase__ : int = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowerCamelCase__ : Tuple = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 )
| 184 |
import math
import os
import sys
def lowercase_ ( _A : str ):
"""simple docstring"""
lowerCamelCase__ : str = ""
try:
with open(_A , "rb" ) as binary_file:
lowerCamelCase__ : Union[str, Any] = binary_file.read()
for dat in data:
lowerCamelCase__ : Optional[Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def lowercase_ ( _A : dict[str, str] , _A : str , _A : int , _A : str ):
"""simple docstring"""
lexicon.pop(_A )
lowerCamelCase__ : List[Any] = last_match_id
if math.loga(_A ).is_integer():
for curr_key in lexicon:
lowerCamelCase__ : Any = "0" + lexicon[curr_key]
lowerCamelCase__ : Tuple = bin(_A )[2:]
def lowercase_ ( _A : str ):
"""simple docstring"""
lowerCamelCase__ : Tuple = {"0": "0", "1": "1"}
lowerCamelCase__ , lowerCamelCase__ : int = "", ""
lowerCamelCase__ : int = len(_A )
for i in range(len(_A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCamelCase__ : Optional[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_A , _A , _A , _A )
index += 1
lowerCamelCase__ : Dict = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowerCamelCase__ : int = lexicon[curr_string]
result += last_match_id
return result
def lowercase_ ( _A : str , _A : str ):
"""simple docstring"""
lowerCamelCase__ : List[str] = os.path.getsize(_A )
lowerCamelCase__ : Dict = bin(_A )[2:]
lowerCamelCase__ : List[Any] = len(_A )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase_ ( _A : str , _A : str ):
"""simple docstring"""
lowerCamelCase__ : Dict = 8
try:
with open(_A , "wb" ) as opened_file:
lowerCamelCase__ : Any = [
to_write[i : i + byte_length]
for i in range(0 , len(_A ) , _A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_A , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def lowercase_ ( _A : str , _A : str ):
"""simple docstring"""
lowerCamelCase__ : Dict = read_file_binary(_A )
lowerCamelCase__ : List[Any] = compress_data(_A )
lowerCamelCase__ : str = add_file_length(_A , _A )
write_file_binary(_A , _A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 184 | 1 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 91 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 91 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A ( A__ ):
"""simple docstring"""
__lowercase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowercase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowercase = [[0.0, 0.0], [0.0, 0.0]]
__lowercase , __lowercase = matrix[1][1], matrix[0][0]
__lowercase , __lowercase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowercase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowercase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowercase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowercase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowercase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowercase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowercase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowercase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowercase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowercase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowercase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowercase = array(A__ )
for i in range(3 ):
for j in range(3 ):
__lowercase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowercase = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 104 |
'''simple docstring'''
def _A ( A__ = 10 , A__ = 22 ):
"""simple docstring"""
__lowercase = range(1 , A__ )
__lowercase = range(1 , A__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(10, 22) = }')
| 104 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[str] = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "unispeech"
def __init__( self, lowerCamelCase__=32, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=0.02, lowerCamelCase__=1e-5, lowerCamelCase__="group", lowerCamelCase__="gelu", lowerCamelCase__=(512, 512, 512, 512, 512, 512, 512), lowerCamelCase__=(5, 2, 2, 2, 2, 2, 2), lowerCamelCase__=(10, 3, 3, 3, 3, 2, 2), lowerCamelCase__=False, lowerCamelCase__=128, lowerCamelCase__=16, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=0.05, lowerCamelCase__=10, lowerCamelCase__=2, lowerCamelCase__=0.0, lowerCamelCase__=10, lowerCamelCase__=0, lowerCamelCase__=320, lowerCamelCase__=2, lowerCamelCase__=0.1, lowerCamelCase__=100, lowerCamelCase__=256, lowerCamelCase__=256, lowerCamelCase__=0.1, lowerCamelCase__="mean", lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=256, lowerCamelCase__=80, lowerCamelCase__=0, lowerCamelCase__=1, lowerCamelCase__=2, lowerCamelCase__=0.5, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__, pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__ )
A : str = hidden_size
A : Optional[Any] = feat_extract_norm
A : Union[str, Any] = feat_extract_activation
A : Optional[Any] = list(lowerCamelCase__ )
A : str = list(lowerCamelCase__ )
A : List[Any] = list(lowerCamelCase__ )
A : Optional[int] = conv_bias
A : List[Any] = num_conv_pos_embeddings
A : str = num_conv_pos_embedding_groups
A : Union[str, Any] = len(self.conv_dim )
A : Optional[int] = num_hidden_layers
A : Any = intermediate_size
A : Optional[int] = hidden_act
A : str = num_attention_heads
A : Optional[Any] = hidden_dropout
A : int = attention_dropout
A : Any = activation_dropout
A : Optional[Any] = feat_proj_dropout
A : List[str] = final_dropout
A : Optional[Any] = layerdrop
A : str = layer_norm_eps
A : int = initializer_range
A : Tuple = num_ctc_classes
A : Union[str, Any] = vocab_size
A : Optional[int] = do_stable_layer_norm
A : str = use_weighted_layer_sum
A : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A : int = apply_spec_augment
A : Optional[int] = mask_time_prob
A : int = mask_time_length
A : str = mask_time_min_masks
A : List[Any] = mask_feature_prob
A : Tuple = mask_feature_length
A : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A : Optional[Any] = num_codevectors_per_group
A : Union[str, Any] = num_codevector_groups
A : Any = contrastive_logits_temperature
A : Dict = feat_quantizer_dropout
A : Union[str, Any] = num_negatives
A : List[Any] = codevector_dim
A : Optional[Any] = proj_codevector_dim
A : List[Any] = diversity_loss_weight
# ctc loss
A : List[str] = ctc_loss_reduction
A : List[str] = ctc_zero_infinity
# pretraining loss
A : List[Any] = replace_prob
@property
def _lowerCAmelCase ( self ):
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 115 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : str = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
A : int = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : str = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 115 | 1 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
a_ : Union[str, Any] = TypeVar("""KEY""")
a_ : Tuple = TypeVar("""VAL""")
@dataclass(frozen=lowercase , slots=lowercase )
class snake_case ( Generic[KEY, VAL] ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
class snake_case ( _Item ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__(UpperCamelCase , UpperCamelCase )
def __bool__( self ):
"""simple docstring"""
return False
a_ : Optional[int] = _DeletedItem()
class snake_case ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , UpperCamelCase = 8 , UpperCamelCase = 0.75 ):
"""simple docstring"""
lowerCamelCase_ = initial_block_size
lowerCamelCase_ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase_ = capacity_factor
lowerCamelCase_ = 0
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return hash(UpperCamelCase ) % len(self._buckets )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self._buckets[ind]
if not stored:
lowerCamelCase_ = _Item(UpperCamelCase , UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase_ = _Item(UpperCamelCase , UpperCamelCase )
return True
else:
return False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase_ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self._buckets
lowerCamelCase_ = [None] * new_size
lowerCamelCase_ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def snake_case ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def snake_case ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self._get_bucket_index(UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase_ = self._get_next_ind(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(UpperCamelCase ):
if self._try_set(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
break
def __setitem__( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(UpperCamelCase , UpperCamelCase )
def __delitem__( self , UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(UpperCamelCase ):
lowerCamelCase_ = self._buckets[ind]
if item is None:
raise KeyError(UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase_ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(UpperCamelCase ):
lowerCamelCase_ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCamelCase )
def __len__( self ):
"""simple docstring"""
return self._len
def __iter__( self ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
"""simple docstring"""
lowerCamelCase_ = " ,".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 55 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = BlenderbotSmallTokenizer
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
lowerCamelCase_ = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "adapt act apte"
lowerCamelCase_ = "adapt act apte"
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = "adapt act apte"
lowerCamelCase_ = ["adapt", "act", "ap@@", "te"]
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCamelCase_ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
lowerCamelCase_ = "I am a small frog."
lowerCamelCase_ = tok([src_text] , padding=UpperCamelCase , truncation=UpperCamelCase )["input_ids"]
lowerCamelCase_ = tok.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
lowerCamelCase_ = "I am a small frog ."
lowerCamelCase_ = "."
lowerCamelCase_ = tok(UpperCamelCase )["input_ids"]
lowerCamelCase_ = tok(UpperCamelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 55 | 1 |
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = get_failure_array(__a )
# 2) Step through text searching for pattern
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = 0, 0 # index into text, pattern
while i < len(__a ):
if pattern[j] == text[i]:
if j == (len(__a ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = failure[j - 1]
continue
i += 1
return False
def _A (__a ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [0]
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while j < len(__a ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
SCREAMING_SNAKE_CASE_ : Tuple = failure[i - 1]
continue
j += 1
failure.append(__a )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase_ : Any = """abc1abc12"""
UpperCAmelCase_ : List[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase_ : Union[str, Any] = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase_ : Optional[Any] = """ABABX"""
UpperCAmelCase_ : List[Any] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase_ : Tuple = """AAAB"""
UpperCAmelCase_ : Optional[Any] = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase_ : Dict = """abcdabcy"""
UpperCAmelCase_ : Dict = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase_ : Any = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 318 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MobileNetV2FeatureExtractor''']
__A : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = ['''pixel_values''']
def __init__( self :Optional[int] ,__snake_case :bool = True ,__snake_case :Optional[Dict[str, int]] = None ,__snake_case :PILImageResampling = PILImageResampling.BICUBIC ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :Union[int, float] = 1 / 2_55 ,__snake_case :Dict[str, int] = None ,__snake_case :bool = True ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,**__snake_case :Dict ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(__snake_case )
a__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(__snake_case ,default_to_square=__snake_case ,param_name='crop_size' )
a__ = do_resize
a__ = do_rescale
a__ = do_normalize
a__ = do_center_crop
a__ = crop_size
a__ = size
a__ = resample
a__ = rescale_factor
a__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase__( self :Dict ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :List[Any] ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "shortest_edge" in size:
a__ = get_resize_output_image_size(__snake_case ,size=size['shortest_edge'] ,default_to_square=__snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
a__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(__snake_case ,size=__snake_case ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Dict ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__snake_case ,size=(size['height'], size['width']) ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :int ) -> np.ndarray:
return rescale(__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :np.ndarray ,__snake_case :Union[float, List[float]] ,__snake_case :Union[float, List[float]] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
return normalize(__snake_case ,mean=__snake_case ,std=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :ImageInput ,__snake_case :Optional[bool] = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :int = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[float] = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Union[str, ChannelDimension] = ChannelDimension.FIRST ,**__snake_case :Optional[int] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(__snake_case ,param_name='crop_size' ,default_to_square=__snake_case )
a__ = resample if resample is not None else self.resample
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case )
if not is_batched(__snake_case ):
a__ = [images]
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a__ = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ) for image in images]
if do_center_crop:
a__ = [self.center_crop(image=__snake_case ,size=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(image=__snake_case ,scale=__snake_case ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__snake_case ,mean=__snake_case ,std=__snake_case ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 240 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCamelCase : str = '''convnextv2'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: str=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Dict=0.02 , _SCREAMING_SNAKE_CASE: List[Any]=1e-12 , _SCREAMING_SNAKE_CASE: List[str]=0.0 , _SCREAMING_SNAKE_CASE: Union[str, Any]=224 , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_stages
UpperCamelCase_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase_ = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase_ = hidden_act
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = image_size
UpperCamelCase_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase_ , UpperCamelCase_ = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 366 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , lowercase_ : str , lowercase_ : Union[str, Any]=13 , lowercase_ : Dict=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=3 , lowercase_ : List[str]=16 , lowercase_ : int=[32, 64, 128] , lowercase_ : List[Any]=[1, 2, 1] , lowercase_ : int=[2, 2, 4] , lowercase_ : str=2 , lowercase_ : Optional[Any]=2.0 , lowercase_ : List[str]=True , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : int=False , lowercase_ : Dict=True , lowercase_ : int=0.02 , lowercase_ : Optional[Any]=1e-5 , lowercase_ : int=True , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=True , lowercase_ : Tuple=10 , lowercase_ : Any=8 , lowercase_ : Any=["stage1", "stage2"] , lowercase_ : str=[1, 2] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE_ : Dict = hidden_sizes
SCREAMING_SNAKE_CASE_ : str = depths
SCREAMING_SNAKE_CASE_ : Any = num_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = window_size
SCREAMING_SNAKE_CASE_ : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE_ : Any = qkv_bias
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = drop_path_rate
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = patch_norm
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : Any = scope
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = encoder_stride
SCREAMING_SNAKE_CASE_ : Any = out_features
SCREAMING_SNAKE_CASE_ : List[str] = out_indices
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FocalNetModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
SCREAMING_SNAKE_CASE_ : Any = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = FocalNetBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : str = FocalNetBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FocalNetForMaskedImageModeling(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Any = FocalNetForMaskedImageModeling(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[int] = FocalNetForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = FocalNetForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : str = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FocalNetModelTester(self)
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self , config_class=lowercase_ , embed_dim=37 , has_text_modality=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@unittest.skip(reason='''FocalNet does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''')
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowercase_) , lowercase_)
# FocalNet has a different seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE_ : int = (
reshaped_hidden_states[0].view(lowercase_ , lowercase_ , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE_ : List[Any] = True
self.check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = True
self.check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE_ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_ : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE_ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE_ : Any = True
self.check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Dict = True
self.check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ , (padded_height, padded_width))
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FocalNetModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = _config_zero_init(lowercase_)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class(config=lowercase_)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''') if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
SCREAMING_SNAKE_CASE_ : str = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
__UpperCamelCase = FocalNetConfig
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = FocalNetModelTester(self)
| 91 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)])
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''')
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_model_config(lowercase_)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = GenerationConfig()
SCREAMING_SNAKE_CASE_ : Any = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = generation_config.update(**lowercase_)
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {'''foo''': '''bar'''})
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig()
SCREAMING_SNAKE_CASE_ : List[str] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir:
generation_config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = GenerationConfig.from_pretrained(lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig.from_model_config(lowercase_)
assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , lowercase_)
self.assertEqual(default_config.num_beams , 1)
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , lowercase_)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''')
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
| 91 | 1 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__snake_case : Optional[int] = (low + high) // 2
__snake_case : Union[str, Any] = max_subarray(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : int = max_subarray(__lowerCamelCase , mid + 1 , __lowerCamelCase )
__snake_case : List[Any] = max_cross_sum(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = float("-inf" ), -1
__snake_case : Tuple = float("-inf" ), -1
__snake_case : int | float = 0
for i in range(__lowerCamelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__snake_case : int = summ
__snake_case : str = i
__snake_case : Dict = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__snake_case : Any = summ
__snake_case : Dict = i
return max_left, max_right, (left_sum + right_sum)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = [randint(1 , __lowerCamelCase ) for _ in range(__lowerCamelCase )]
__snake_case : Dict = time.time()
max_subarray(__lowerCamelCase , 0 , input_size - 1 )
__snake_case : Dict = time.time()
return end - start
def lowerCAmelCase_ ( ):
__snake_case : Dict = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__snake_case : Optional[Any] = [time_max_subarray(__lowerCamelCase ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(__lowerCamelCase , __lowerCamelCase ):
print(__lowerCamelCase , "\t\t" , __lowerCamelCase )
plt.plot(__lowerCamelCase , __lowerCamelCase )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = []
__snake_case : Optional[Any] = []
__snake_case : List[Any] = []
for rt in rc.restypes:
__snake_case : Any = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__snake_case : Tuple = {name: i for i, name in enumerate(__lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__snake_case : int = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
__snake_case : List[str] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
__snake_case : Optional[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
__snake_case : Optional[int] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__snake_case : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
__snake_case : Tuple = restype_atomaa_mask[protein_aatype]
__snake_case : Optional[Any] = residx_atomaa_mask
__snake_case : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__snake_case : Dict = restype_atomaa_to_atomaa[protein_aatype]
__snake_case : Dict = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__snake_case : List[str] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
__snake_case : List[str] = rc.restype_atoa[restype_letter]
__snake_case : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__snake_case : Union[str, Any] = rc.atom_order[atom_name]
__snake_case : str = 1
__snake_case : List[str] = restype_atomaa_mask[protein_aatype]
__snake_case : List[str] = residx_atomaa_mask
return protein
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray )
__snake_case : str = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) )
return out
| 134 | 0 |
"""simple docstring"""
from collections.abc import Callable
def lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
__UpperCAmelCase : float = a
__UpperCAmelCase : float = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
__UpperCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
__UpperCAmelCase : Union[str, Any] = mid
else:
__UpperCAmelCase : List[Any] = mid
__UpperCAmelCase : Optional[Any] = start + (end - start) / 2.0
return mid
def lowerCamelCase ( _UpperCamelCase : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 115 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCamelCase__ ( ctypes.Structure ):
"""simple docstring"""
__a = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
__UpperCAmelCase : Dict = CursorInfo()
__UpperCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
__UpperCAmelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
__UpperCAmelCase : str = CursorInfo()
__UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
__UpperCAmelCase : Union[str, Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> str:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 115 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _A ( _a : Callable , _a : float , _a : float , _a : float , _a : float ):
"""simple docstring"""
A = int(np.ceil((x_end - xa) / step_size ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(_a ):
A = y[k] + step_size * ode_func(_a , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
"""simple docstring"""
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Any:
A = 0
A = 0
A = {}
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
if vertex not in self.adjacency:
A = {}
self.num_vertices += 1
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[Any]:
self.add_vertex(lowerCamelCase_ )
self.add_vertex(lowerCamelCase_ )
if head == tail:
return
A = weight
A = weight
def UpperCamelCase__ ( self ) -> List[str]:
A = self.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase_ ) ):
A = list(edges[i] )
edges.sort(key=lambda lowerCamelCase_ : e[2] )
for i in range(len(lowerCamelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A = edges[i][2] + 1
for edge in edges:
A , A , A = edge
A = weight
A = weight
def __str__( self ) -> Dict:
A = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip("""\n""" )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase__ ( self ) -> List[str]:
return self.adjacency.keys()
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_=None ,lowerCamelCase_=None ) -> Optional[Any]:
A = Graph()
if vertices is None:
A = []
if edges is None:
A = []
for vertex in vertices:
g.add_vertex(lowerCamelCase_ )
for edge in edges:
g.add_edge(*lowerCamelCase_ )
return g
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> List[str]:
A = {}
A = {}
def __len__( self ) -> List[str]:
return len(self.parent )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
if item in self.parent:
return self.find(lowerCamelCase_ )
A = item
A = 0
return item
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
if item not in self.parent:
return self.make_set(lowerCamelCase_ )
if item != self.parent[item]:
A = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
A = self.find(lowerCamelCase_ )
A = self.find(lowerCamelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A = roota
return roota
if self.rank[roota] < self.rank[roota]:
A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A = roota
return roota
return None
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ) -> List[str]:
A = graph.num_vertices
A = Graph.UnionFind()
A = []
while num_components > 1:
A = {}
for vertex in graph.get_vertices():
A = -1
A = graph.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for edge in edges:
A , A , A = edge
A = union_find.find(lowerCamelCase_ )
A = union_find.find(lowerCamelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A , A , A = cheap_edge[vertex]
if union_find.find(lowerCamelCase_ ) != union_find.find(lowerCamelCase_ ):
union_find.union(lowerCamelCase_ ,lowerCamelCase_ )
mst_edges.append(cheap_edge[vertex] )
A = num_components - 1
A = Graph.build(edges=lowerCamelCase_ )
return mst
| 77 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : Tuple = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : Any = "xmod"
def __init__(self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : Optional[int] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : List[Any] = type_vocab_size
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : Dict = layer_norm_eps
lowerCamelCase_ : str = position_embedding_type
lowerCamelCase_ : Tuple = use_cache
lowerCamelCase_ : List[Any] = classifier_dropout
lowerCamelCase_ : Dict = pre_norm
lowerCamelCase_ : Union[str, Any] = adapter_reduction_factor
lowerCamelCase_ : Optional[int] = adapter_layer_norm
lowerCamelCase_ : List[str] = adapter_reuse_layer_norm
lowerCamelCase_ : Any = ln_before_adapter
lowerCamelCase_ : Tuple = list(A )
lowerCamelCase_ : int = default_language
class __lowercase ( _lowercase ):
@property
def UpperCAmelCase__ (self ):
if self.task == "multiple-choice":
lowerCamelCase_ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 318 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__a :List[str] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase ) | 329 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase ) | 329 | 1 |
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(__UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__UpperCamelCase : int ,__UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A_ = sum(
count_of_possible_combinations_with_dp_array(target - item ,__UpperCamelCase )
for item in array )
A_ = answer
return answer
A_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = [0] * (target + 1)
A_ = 1
for i in range(1 ,target + 1 ):
for j in range(__UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a :int = 3
__a :Any = 5
__a :List[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 312 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=[1, 16, 4, 4] , SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
"""simple docstring"""
A : Any = parent
A : str = batch_size
A : Optional[Any] = image_size
A : Dict = patch_size
A : List[str] = num_channels
A : Optional[int] = is_training
A : List[str] = use_labels
A : Tuple = hidden_size
A : Optional[int] = num_hidden_layers
A : Optional[Any] = num_attention_heads
A : int = intermediate_size
A : Union[str, Any] = hidden_act
A : Dict = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Dict = type_sequence_label_size
A : List[Any] = initializer_range
A : int = scope
A : Dict = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A : Tuple = (self.image_size // 32) ** 2
A : Union[str, Any] = num_patches + 1
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Union[str, Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = ViTHybridModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Optional[int] = self.type_sequence_label_size
A : Dict = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Dict = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = self.prepare_config_and_inputs()
A : Tuple = config_and_inputs
A : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[int] = ViTHybridModelTester(self )
A : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int = model_class(SCREAMING_SNAKE_CASE )
A : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Tuple = [*signature.parameters.keys()]
A : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : Union[str, Any] = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A : List[Any] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[int] = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
A : List[str] = self.default_image_processor
A : Any = prepare_img()
A : List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : List[str] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
@require_accelerate
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
A : Union[str, Any] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
A : Any = prepare_img()
A : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
A : Optional[int] = model(**SCREAMING_SNAKE_CASE )
A : List[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
A : Any = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 357 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : Any = parent
A : List[Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : int = use_input_mask
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Tuple = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[int] = initializer_range
A : Any = use_labels
A : Optional[int] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
),
) : Any = self.prepare_config_and_inputs()
A : Tuple = True
A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = True
A : Tuple = True
A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
# first forward pass
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A, A, A : Optional[int] = self.prepare_config_and_inputs()
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__magic_name__ = (BertGenerationDecoder,) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = BertGenerationEncoderTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs()
A : str = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Dict = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Any = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase__ : List[str] = logging.getLogger(__name__)
class UpperCAmelCase ( lowercase_ ):
'''simple docstring'''
lowerCAmelCase_ = '''masked_bert'''
def __init__( self : int , __lowercase : List[Any]=3_05_22 , __lowercase : str=7_68 , __lowercase : Dict=12 , __lowercase : Optional[Any]=12 , __lowercase : int=30_72 , __lowercase : Any="gelu" , __lowercase : int=0.1 , __lowercase : Any=0.1 , __lowercase : Union[str, Any]=5_12 , __lowercase : Union[str, Any]=2 , __lowercase : Optional[Any]=0.02 , __lowercase : int=1E-12 , __lowercase : Any=0 , __lowercase : Union[str, Any]="topK" , __lowercase : str="constant" , __lowercase : List[Any]=0.0 , **__lowercase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = pruning_method
snake_case_ = mask_init
snake_case_ = mask_scale
| 187 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : int = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'xlm-roberta'
def __init__( self : List[Any] , lowerCAmelCase_ : str=3_05_22 , lowerCAmelCase_ : Any=7_68 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : str=1e-12 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any="absolute" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Any =vocab_size
A__ : Any =hidden_size
A__ : Any =num_hidden_layers
A__ : str =num_attention_heads
A__ : Union[str, Any] =hidden_act
A__ : Union[str, Any] =intermediate_size
A__ : Optional[Any] =hidden_dropout_prob
A__ : List[Any] =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : int =type_vocab_size
A__ : Any =initializer_range
A__ : Union[str, Any] =layer_norm_eps
A__ : str =position_embedding_type
A__ : str =use_cache
A__ : Any =classifier_dropout
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ : Union[str, Any] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : str ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 134 | 0 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase = "cpu" , __lowerCamelCase = None ):
__a = torch.load(__a , map_location=__a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__a , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__a = v.half()
if save_path is None: # overwrite src_path
__a = src_path
torch.save(__a , __a )
if __name__ == "__main__":
fire.Fire(convert)
| 350 | def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(__lowerCamelCase )
__a = []
queue.append(__lowerCamelCase )
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
__a = True
__a = u
return visited[t]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# This array is filled by BFS and to store path
__a = [-1] * (len(__lowerCamelCase ))
__a = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = float('Inf' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(__lowerCamelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
return max_flow
lowerCamelCase_ : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 197 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_a)
class UpperCAmelCase_ ( _a):
def __init__( self , *a , **a ) -> Union[str, Any]:
super().__init__(*a , **a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _UpperCAmelCase ( self , a=None , a=None , a=None ) -> Dict:
lowercase__ : int = {}
lowercase__ : List[str] = {}
if prompt is not None:
lowercase__ : Any = prompt
if generate_kwargs is not None:
lowercase__ : Dict = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase__ : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
lowercase__ : List[str] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a , **a ) -> List[str]:
return super().__call__(a , **a )
def _UpperCAmelCase ( self , a , a=None ) -> Optional[Any]:
lowercase__ : List[Any] = load_image(a )
if prompt is not None:
if not isinstance(a , a ):
raise ValueError(
f"""Received an invalid text input, got - {type(a )} - but expected a single string. """
'Note also that one single text can be provided for conditional image to text generation.' )
lowercase__ : Optional[Any] = self.model.config.model_type
if model_type == "git":
lowercase__ : List[str] = self.image_processor(images=a , return_tensors=self.framework )
lowercase__ : List[Any] = self.tokenizer(text=a , add_special_tokens=a ).input_ids
lowercase__ : int = [self.tokenizer.cls_token_id] + input_ids
lowercase__ : Tuple = torch.tensor(a ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
lowercase__ : Dict = self.image_processor(images=a , header_text=a , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase__ : int = self.image_processor(images=a , return_tensors=self.framework )
lowercase__ : Optional[int] = self.tokenizer(a , return_tensors=self.framework )
model_inputs.update(a )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase__ : Any = self.image_processor(images=a , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase__ : Optional[int] = None
return model_inputs
def _UpperCAmelCase ( self , a , a=None ) -> Dict:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , a )
and all(x is None for x in model_inputs['input_ids'] )
):
lowercase__ : Tuple = None
if generate_kwargs is None:
lowercase__ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase__ : Optional[int] = model_inputs.pop(self.model.main_input_name )
lowercase__ : List[Any] = self.model.generate(a , **a , **a )
return model_outputs
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : List[str] = []
for output_ids in model_outputs:
lowercase__ : str = {
'generated_text': self.tokenizer.decode(
a , skip_special_tokens=a , )
}
records.append(a )
return records
| 77 | """simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : np.ndarray
lowerCamelCase__ : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 77 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
_UpperCamelCase = {'''mgp-str''': 27}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , __a , __a="[GO]" , __a="[GO]" , __a="[s]" , __a="[GO]" , **__a ) -> Optional[int]:
"""simple docstring"""
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a , )
with open(__a , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase__ = json.load(__a )
UpperCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
UpperCAmelCase__ = []
for s in text:
char_tokens.extend(__a )
return char_tokens
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
return self.decoder.get(__a )
def UpperCamelCase__ (self , __a , __a = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error('Vocabulary path ({}) should be a directory'.format(__a ) )
return
UpperCAmelCase__ = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '\n' )
return (vocab_file,)
| 365 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 335 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase__ :str = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: Any , a__: List[str] ) -> List[Any]:
'''simple docstring'''
try:
with open(a__ , 'rb' ) as flax_state_f:
_UpperCAmelCase = from_bytes(a__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(a__ , a__ )
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Any ) -> Any:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
_UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda a__ : x.dtype == jnp.bfloataa , a__ ) ).values()
if any(a__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
_UpperCAmelCase = jax.tree_util.tree_map(
lambda a__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a__ )
_UpperCAmelCase = ''
_UpperCAmelCase = flatten_dict(a__ , sep='.' )
_UpperCAmelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
_UpperCAmelCase = []
_UpperCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_UpperCAmelCase = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight']
_UpperCAmelCase = jnp.transpose(a__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight']
_UpperCAmelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a__ ):
_UpperCAmelCase = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
_UpperCAmelCase = '.'.join(a__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
_UpperCAmelCase = np.asarray(a__ ) if not isinstance(a__ , np.ndarray ) else flax_tensor
_UpperCAmelCase = torch.from_numpy(a__ )
# remove from missing keys
missing_keys.remove(a__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a__ )
pt_model.load_state_dict(a__ )
# re-transform missing_keys to list
_UpperCAmelCase = list(a__ )
if len(a__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(a__ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
return pt_model
| 329 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : List[str] = 'openai-gpt'
_a : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
while second != 0:
lowerCAmelCase__ : List[Any] = first & second
first ^= second
lowerCAmelCase__ : Optional[Any] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[str] = int(input('''Enter the first number: ''').strip())
__UpperCamelCase : Tuple = int(input('''Enter the second number: ''').strip())
print(F'''{add(first, second) = }''')
| 74 |
"""simple docstring"""
from __future__ import annotations
import math
__UpperCamelCase : Dict = '''2020.9.26'''
__UpperCamelCase : Tuple = '''xcodz-dot, cclaus, dhruvmanila'''
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
if not all(isinstance(A_ , (float, int) ) for val in locals().values() ):
lowerCAmelCase__ : Optional[Any] = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(A_ )
lowerCAmelCase__ : Optional[Any] = ((x * distance) / (z + distance)) * scale
lowerCAmelCase__ : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
if not isinstance(A_ , A_ ):
raise TypeError('''Axis must be a str''' )
lowerCAmelCase__ : str = locals()
del input_variables["axis"]
if not all(isinstance(A_ , (float, int) ) for val in input_variables.values() ):
lowerCAmelCase__ : int = (
'''Input values except axis must either be float or int: '''
f'{list(input_variables.values() )}'
)
raise TypeError(A_ )
lowerCAmelCase__ : Any = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
lowerCAmelCase__ : Tuple = x * math.cos(A_ ) - y * math.sin(A_ )
lowerCAmelCase__ : List[str] = y * math.cos(A_ ) + x * math.sin(A_ )
lowerCAmelCase__ : Optional[Any] = z
elif axis == "x":
lowerCAmelCase__ : List[str] = y * math.cos(A_ ) - z * math.sin(A_ )
lowerCAmelCase__ : str = z * math.cos(A_ ) + y * math.sin(A_ )
lowerCAmelCase__ : Union[str, Any] = x
elif axis == "y":
lowerCAmelCase__ : Optional[int] = x * math.cos(A_ ) - z * math.sin(A_ )
lowerCAmelCase__ : Tuple = z * math.cos(A_ ) + x * math.sin(A_ )
lowerCAmelCase__ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }''')
| 74 | 1 |
import re
from ..utils import cached_file
# docstyle-ignore
_lowercase : str ="\nHuman: <<task>>\n\nAssistant: "
_lowercase : int ="huggingface-tools/default-prompts"
_lowercase : List[str] ={"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Dict="run") -> List[Any]:
"""simple docstring"""
if prompt_or_repo_id is None:
a__ : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , _lowercase) is not None:
return prompt_or_repo_id
a__ : List[str] = cached_file(
_lowercase , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name})
with open(_lowercase , """r""" , encoding="""utf-8""") as f:
return f.read()
| 170 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_A = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = "sgugger/tiny-distilbert-classification"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "patrickvonplaten/t5-tiny-random"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , configs=[config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv" ) , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv" ) ).exists() )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : Any ):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "current" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt" ) ).exists() )
| 174 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'xlm-roberta-base': 5_12,
'xlm-roberta-large': 5_12,
'xlm-roberta-large-finetuned-conll02-dutch': 5_12,
'xlm-roberta-large-finetuned-conll02-spanish': 5_12,
'xlm-roberta-large-finetuned-conll03-english': 5_12,
'xlm-roberta-large-finetuned-conll03-german': 5_12,
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = VOCAB_FILES_NAMES
UpperCAmelCase_ :int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ :int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCAmelCase_ :Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowerCAmelCase_ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
lowerCAmelCase_ :Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ :Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ :List[str] = 1
lowerCAmelCase_ :int = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase_ :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
lowerCAmelCase_ :List[str] = self.__dict__.copy()
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ :str = {}
lowerCAmelCase_ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ :Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ :List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ :Optional[Any] = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :List[str] = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ :Tuple = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
lowerCAmelCase_ :int = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 84 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = 'open-llama'
def __init__( self , __lowerCAmelCase=10_0000 , __lowerCAmelCase=4096 , __lowerCAmelCase=1_1008 , __lowerCAmelCase=32 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase=2048 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = initializer_range
lowercase = rms_norm_eps
lowercase = use_cache
lowercase = kwargs.pop(
"""use_memorry_efficient_attention""" , __lowerCAmelCase )
lowercase = hidden_dropout_prob
lowercase = attention_dropout_prob
lowercase = use_stable_embedding
lowercase = shared_input_output_embedding
lowercase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
lowercase = self.rope_scaling.get("""type""" , __lowerCAmelCase )
lowercase = self.rope_scaling.get("""factor""" , __lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 197 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase (_UpperCAmelCase ):
'''simple docstring'''
_snake_case : Tuple = ['''image_processor''', '''tokenizer''']
_snake_case : Dict = '''BlipImageProcessor'''
_snake_case : List[Any] = '''AutoTokenizer'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
super().__init__(lowercase_ , lowercase_ )
# add QFormer tokenizer
UpperCAmelCase_ : Optional[int] = qformer_tokenizer
def __call__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ) -> Tuple:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
UpperCAmelCase_ : List[str] = BatchFeature()
if text is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
encoding.update(lowercase_ )
UpperCAmelCase_ : Tuple = self.qformer_tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : List[Any] = qformer_text_encoding.pop('input_ids' )
UpperCAmelCase_ : List[Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
UpperCAmelCase_ : str = self.image_processor(lowercase_ , return_tensors=lowercase_ )
encoding.update(lowercase_ )
return encoding
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = self.tokenizer.model_input_names
UpperCAmelCase_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
if os.path.isfile(lowercase_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCAmelCase_ : List[Any] = os.path.join(lowercase_ , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowercase_ )
return super().save_pretrained(lowercase_ , **lowercase_ )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(lowercase_ , subfolder='qformer_tokenizer' )
UpperCAmelCase_ : List[Any] = cls._get_arguments_from_pretrained(lowercase_ , **lowercase_ )
args.append(lowercase_ )
return cls(*lowercase_ )
| 371 |
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( __snake_case : int = 1_000_000 , __snake_case : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : defaultdict = defaultdict(__snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ : Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase_ : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 145 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " ,_lowerCamelCase ,)
class lowerCAmelCase_ ( _lowerCamelCase ):
__lowerCamelCase : List[Any] = RobertaConfig
__lowerCamelCase : List[str] = "roberta"
def __init__( self , _lowerCAmelCase ) -> Any:
super().__init__(_lowerCAmelCase )
_lowerCAmelCase = RobertaEmbeddings(_lowerCAmelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " ,_lowerCamelCase ,)
class lowerCAmelCase_ ( _lowerCamelCase ):
__lowerCamelCase : List[str] = RobertaConfig
__lowerCamelCase : Tuple = "roberta"
def __init__( self , _lowerCAmelCase ) -> Optional[int]:
super().__init__(_lowerCAmelCase )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = config.num_hidden_layers
_lowerCAmelCase = DeeRobertaModel(_lowerCAmelCase )
_lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=-1 , _lowerCAmelCase=False , ) -> Dict:
_lowerCAmelCase = self.num_layers
try:
_lowerCAmelCase = self.roberta(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , position_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase , inputs_embeds=_lowerCAmelCase , )
_lowerCAmelCase = outputs[1]
_lowerCAmelCase = self.dropout(_lowerCAmelCase )
_lowerCAmelCase = self.classifier(_lowerCAmelCase )
_lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase = e.message
_lowerCAmelCase = e.exit_layer
_lowerCAmelCase = outputs[0]
if not self.training:
_lowerCAmelCase = entropy(_lowerCAmelCase )
_lowerCAmelCase = []
_lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase = []
for highway_exit in outputs[-1]:
_lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowerCAmelCase )
if train_highway:
_lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase = (loss,) + outputs
if not self.training:
_lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 158 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self : Optional[Any] ) -> None:
_UpperCamelCase = {} # Mapping from char to TrieNode
_UpperCamelCase = False
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : list[str] ) -> None:
for word in words:
self.insert(__UpperCamelCase )
def _UpperCamelCase ( self : str , __UpperCamelCase : str ) -> None:
_UpperCamelCase = self
for char in word:
if char not in curr.nodes:
_UpperCamelCase = TrieNode()
_UpperCamelCase = curr.nodes[char]
_UpperCamelCase = True
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str ) -> bool:
_UpperCamelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCamelCase = curr.nodes[char]
return curr.is_leaf
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str ) -> None:
def _delete(__UpperCamelCase : TrieNode , __UpperCamelCase : str , __UpperCamelCase : int ) -> bool:
if index == len(__UpperCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCamelCase = False
return len(curr.nodes ) == 0
_UpperCamelCase = word[index]
_UpperCamelCase = curr.nodes.get(__UpperCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCamelCase = _delete(__UpperCamelCase , __UpperCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __UpperCamelCase , 0 )
def lowercase ( a__ : TrieNode , a__ : str ) -> None:
if node.is_leaf:
print(a__ , end=''' ''' )
for key, value in node.nodes.items():
print_words(a__ , word + key )
def lowercase ( ) -> bool:
_UpperCamelCase = '''banana bananas bandana band apple all beast'''.split()
_UpperCamelCase = TrieNode()
root.insert_many(a__ )
# print_words(root, "")
assert all(root.find(a__ ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowercase ( a__ : str , a__ : bool ) -> None:
print(str(a__ ) , '''works!''' if passes else '''doesn\'t work :(''' )
def lowercase ( ) -> None:
assert test_trie()
def lowercase ( ) -> None:
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 54 | """simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] ) -> Optional[Any]:
_UpperCamelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = VideoClassificationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase , top_k=2 )
_UpperCamelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> str:
for example in examples:
_UpperCamelCase = video_classifier(__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{'''score''': ANY(__UpperCamelCase ), '''label''': ANY(__UpperCamelCase )},
{'''score''': ANY(__UpperCamelCase ), '''label''': ANY(__UpperCamelCase )},
] , )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
_UpperCamelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
_UpperCamelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
_UpperCamelCase = pipeline(
'''video-classification''' , model=__UpperCamelCase , feature_extractor=__UpperCamelCase , frame_sampling_rate=4 )
_UpperCamelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = video_classifier(__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}] , )
_UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
| 54 | 1 |
"""simple docstring"""
from math import pow, sqrt
def _snake_case ( *snake_case__ : float ):
A = len(snake_case__ ) > 0 and all(value > 0.0 for value in values )
return result
def _snake_case ( snake_case__ : float , snake_case__ : float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
) | 74 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_A = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3FeatureExtractor"""]
_A = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A__ : List[str] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
A__ : Any = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
A__ : Union[str, Any] = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self : Any):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Optional[int]=None , A_ : Optional[Any]=True , A_ : Optional[int]=False):
if rouge_types is None:
lowerCAmelCase_ : Dict = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowerCAmelCase_ : List[str] = rouge_scorer.RougeScorer(rouge_types=A_ , use_stemmer=A_)
if use_aggregator:
lowerCAmelCase_ : List[Any] = scoring.BootstrapAggregator()
else:
lowerCAmelCase_ : int = []
for ref, pred in zip(A_ , A_):
lowerCAmelCase_ : List[Any] = scorer.score(A_ , A_)
if use_aggregator:
aggregator.add_scores(A_)
else:
scores.append(A_)
if use_aggregator:
lowerCAmelCase_ : Optional[int] = aggregator.aggregate()
else:
lowerCAmelCase_ : Union[str, Any] = {}
for key in scores[0]:
lowerCAmelCase_ : List[Any] = [score[key] for score in scores]
return result
| 103 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "gpt_neo"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Optional[int] , __snake_case : Dict=5_02_57 , __snake_case : Tuple=20_48 , __snake_case : Any=20_48 , __snake_case : Optional[Any]=24 , __snake_case : Optional[Any]=[[["global", "local"], 12]] , __snake_case : Optional[Any]=16 , __snake_case : str=None , __snake_case : List[str]=2_56 , __snake_case : Optional[Any]="gelu_new" , __snake_case : Optional[int]=0.0 , __snake_case : str=0.0 , __snake_case : List[Any]=0.0 , __snake_case : str=0.1 , __snake_case : Union[str, Any]=1e-5 , __snake_case : Optional[int]=0.02 , __snake_case : str=True , __snake_case : Tuple=5_02_56 , __snake_case : Dict=5_02_56 , **__snake_case : Tuple , )-> List[str]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = num_layers
snake_case = num_heads
snake_case = intermediate_size
snake_case = window_size
snake_case = activation_function
snake_case = resid_dropout
snake_case = embed_dropout
snake_case = attention_dropout
snake_case = classifier_dropout
snake_case = layer_norm_epsilon
snake_case = initializer_range
snake_case = use_cache
snake_case = bos_token_id
snake_case = eos_token_id
snake_case = attention_types
snake_case = self.expand_attention_types_params(__snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Any )-> Optional[int]:
snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> Tuple:
import torch
snake_case = input.size()
snake_case = len(__lowerCAmelCase )
snake_case = shape[dimension]
snake_case = torch.arange(0 , __lowerCAmelCase , __lowerCAmelCase )
snake_case = torch.div(sizedim - size , __lowerCAmelCase , rounding_mode="""floor""" ) + 1
snake_case = torch.arange(__lowerCAmelCase ) + low_indices[:min_length][:, None]
snake_case = [slice(__lowerCAmelCase )] * rank
snake_case = indices
snake_case = input[s]
snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> List[Any]:
import torch
snake_case = torch.arange(1 , __lowerCAmelCase )
snake_case = torch.remainder(__lowerCAmelCase , __lowerCAmelCase )
snake_case = remainders == 0
snake_case = candidates[divisor_indices]
snake_case = torch.max(__lowerCAmelCase )
return largest_divisor, torch.div(__lowerCAmelCase , __lowerCAmelCase , rounding_mode="""floor""" )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Dict )-> Mapping[str, Mapping[int, str]]:
snake_case = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
snake_case = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase ( self : Tuple )-> int:
return self._config.num_heads
def lowerCAmelCase ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
snake_case = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case = seqlen + 2
snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
snake_case = common_inputs["""attention_mask"""]
if self.use_past:
snake_case = ordered_inputs["""attention_mask"""].dtype
snake_case = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self : Dict )-> int:
return 13
| 371 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | '''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A__ :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = str(id_ )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : int = {} # {vertex:distance}
def __lt__( self : List[str] , lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : int ) -> Any:
"""simple docstring"""
return self.id
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
self.neighbors.append(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = weight
def __UpperCAmelCase ( a_: Any, a_: Optional[Any], a_: Optional[Any], a_: List[str] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], a_ )
graph[b - 1].add_edge(graph[a - 1], a_ )
def __UpperCAmelCase ( a_: list, a_: Vertex ):
_UpperCAmelCase : Optional[int] = []
for u in graph:
_UpperCAmelCase : Dict = math.inf
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = graph[:]
while q:
_UpperCAmelCase : List[Any] = min(a_ )
q.remove(a_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_UpperCAmelCase : Optional[Any] = u
_UpperCAmelCase : List[Any] = u.edges[v.id]
for i in range(1, len(a_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __UpperCAmelCase ( a_: list, a_: Vertex ):
for u in graph:
_UpperCAmelCase : Optional[Any] = math.inf
_UpperCAmelCase : str = None
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : List[str] = list(a_ )
hq.heapify(a_ )
while h:
_UpperCAmelCase : str = hq.heappop(a_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_UpperCAmelCase : Any = u
_UpperCAmelCase : Optional[int] = u.edges[v.id]
hq.heapify(a_ )
for i in range(1, len(a_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __UpperCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 145 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 100 , ) -> Optional[int]:
snake_case_ = x_start
snake_case_ = fnc(lowerCamelCase_ )
snake_case_ = 0.0
for _ in range(lowerCamelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
snake_case_ = (x_end - x_start) / steps + xa
snake_case_ = fnc(lowerCamelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
snake_case_ = xa
snake_case_ = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
__UpperCamelCase = 10
while i <= 10_0000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 358 | """simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=1_3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[Any]=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Dict=3_7 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=5_1_2 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : str=4 , ) -> List[Any]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_choices
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self : str ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = True
snake_case__ : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self : str ) -> Tuple:
__SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase_ ( self : int ) -> Any:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
@require_flax
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = 5_0_0_0_0
__SCREAMING_SNAKE_CASE = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 54 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ : Tuple = False
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
image=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
__SCREAMING_SNAKE_CASE = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 54 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase_ = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
lowerCAmelCase_ = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
lowerCAmelCase_ = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase_ = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase_ = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
for tf_name, hf_name in patterns:
snake_case_ : Any = k.replace(_UpperCamelCase , _UpperCamelCase )
return k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
snake_case_ : int = BigBirdPegasusConfig(**_UpperCamelCase )
snake_case_ : Any = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
snake_case_ : Any = torch_model.state_dict()
snake_case_ : Optional[int] = {}
# separating decoder weights
snake_case_ : int = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
snake_case_ : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
snake_case_ : int = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
snake_case_ : Tuple = DECODER_PATTERNS
snake_case_ : List[str] = rename_state_dict_key(_UpperCamelCase , _UpperCamelCase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
snake_case_ : int = v.T
snake_case_ : Optional[int] = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
snake_case_ : Union[str, Any] = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
snake_case_ : Union[str, Any] = REMAINING_PATTERNS
snake_case_ : Union[str, Any] = rename_state_dict_key(_UpperCamelCase , _UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
snake_case_ : List[Any] = v.T
snake_case_ : Union[str, Any] = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
snake_case_ : Tuple = mapping['''model.embed_positions.weight''']
snake_case_ : Any = mapping.pop('''model.embed_positions.weight''' )
snake_case_ , snake_case_ : List[str] = torch_model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
snake_case_ : Union[str, Any] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = tf.train.list_variables(_UpperCamelCase )
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase , desc='''converting tf checkpoint to dict''' ):
snake_case_ : List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case_ : int = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Union[str, Any] = array
return tf_weights
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = get_tf_weights_as_numpy(_UpperCamelCase )
snake_case_ : Dict = convert_bigbird_pegasus(_UpperCamelCase , _UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 279 |
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1_000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case_ : int = n - 1
snake_case_ : Union[str, Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case_ : List[Any] = 0
while count < prec:
snake_case_ : Tuple = random.randint(2 , n - 1 )
snake_case_ : List[Any] = bin_exp_mod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if b != 1:
snake_case_ : Any = True
for _ in range(_UpperCamelCase ):
if b == n - 1:
snake_case_ : Tuple = False
break
snake_case_ : List[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase_ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 279 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : List[str] , A_ : Dict , A_ : bool = True , A_ : Dict[str, int] = None , A_ : int = 3_2 , A_ : bool = True , A_ : Union[int, float] = 1 / 2_5_5 , A_ : bool = True , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , A_ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , A_ : bool = True , A_ : List[str]=7 , A_ : Any=3_0 , A_ : Dict=4_0_0 , A_ : str=3 , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 2_8_8}
lowerCAmelCase_ : int = size_divisor
lowerCAmelCase_ : List[str] = do_rescale
lowerCAmelCase_ : List[Any] = rescale_factor
lowerCAmelCase_ : List[Any] = do_normalize
lowerCAmelCase_ : Optional[Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = image_mean
lowerCAmelCase_ : Dict = image_std
lowerCAmelCase_ : List[Any] = do_pad
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Any = min_resolution
lowerCAmelCase_ : Optional[int] = max_resolution
def UpperCAmelCase__ ( self : int):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase__ ( self : Dict , A_ : Tuple , A_ : str=False):
if not batched:
lowerCAmelCase_ : Optional[int] = self.size['''shortest_edge''']
lowerCAmelCase_ : List[str] = image_inputs[0]
if isinstance(A_ , Image.Image):
lowerCAmelCase_ , lowerCAmelCase_ : str = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = image.shape[1], image.shape[2]
lowerCAmelCase_ : str = size / min(A_ , A_)
if h < w:
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = size, scale * w
else:
lowerCAmelCase_ , lowerCAmelCase_ : Dict = scale * h, size
lowerCAmelCase_ : Union[str, Any] = int((1_3_3_3 / 8_0_0) * size)
if max(A_ , A_) > max_size:
lowerCAmelCase_ : Tuple = max_size / max(A_ , A_)
lowerCAmelCase_ : int = newh * scale
lowerCAmelCase_ : int = neww * scale
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = int(newh + 0.5), int(neww + 0.5)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowerCAmelCase_ : Any = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ : int = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
lowerCAmelCase_ : List[str] = max(A_ , key=lambda A_: item[0])[0]
lowerCAmelCase_ : List[str] = max(A_ , key=lambda A_: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Tuple = BridgeTowerImageProcessingTester(self)
@property
def UpperCAmelCase__ ( self : str):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A_ , '''image_mean'''))
self.assertTrue(hasattr(A_ , '''image_std'''))
self.assertTrue(hasattr(A_ , '''do_normalize'''))
self.assertTrue(hasattr(A_ , '''do_resize'''))
self.assertTrue(hasattr(A_ , '''size'''))
self.assertTrue(hasattr(A_ , '''size_divisor'''))
def UpperCAmelCase__ ( self : List[Any]):
pass
def UpperCAmelCase__ ( self : Union[str, Any]):
# Initialize image processor
lowerCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_)
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image)
# Test not batched input
lowerCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.image_processor_tester.get_expected_values(A_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ : Optional[int] = image_processing(A_ , return_tensors='''pt''').pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : str = self.image_processor_tester.get_expected_values(A_ , batched=A_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Optional[int]):
# Initialize image processor
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray)
# Test not batched input
lowerCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.image_processor_tester.get_expected_values(A_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ : List[str] = image_processing(A_ , return_tensors='''pt''').pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.image_processor_tester.get_expected_values(A_ , batched=A_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Dict):
# Initialize image processor
lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor)
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : int = self.image_processor_tester.get_expected_values(A_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ : str = image_processing(A_ , return_tensors='''pt''').pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : int = self.image_processor_tester.get_expected_values(A_ , batched=A_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 103 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """mgp-str"""
def __init__( self : int , _lowerCAmelCase : str=[3_2, 1_2_8] , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : int=3 , _lowerCAmelCase : str=2_7 , _lowerCAmelCase : List[str]=3_8 , _lowerCAmelCase : Tuple=5_0_2_5_7 , _lowerCAmelCase : str=3_0_5_2_2 , _lowerCAmelCase : Optional[int]=7_6_8 , _lowerCAmelCase : Optional[int]=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[int]=4.0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : str=False , _lowerCAmelCase : List[Any]=0.02 , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =max_token_length
__lowercase =num_character_labels
__lowercase =num_bpe_labels
__lowercase =num_wordpiece_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =mlp_ratio
__lowercase =distilled
__lowercase =layer_norm_eps
__lowercase =drop_rate
__lowercase =qkv_bias
__lowercase =attn_drop_rate
__lowercase =drop_path_rate
__lowercase =output_aa_attentions
__lowercase =initializer_range
| 166 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = KandinskyImgaImgPipeline
lowerCAmelCase__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowerCAmelCase__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowerCAmelCase__ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ = False
@property
def _lowerCAmelCase (self :Optional[int] )-> int:
return 32
@property
def _lowerCAmelCase (self :Optional[Any] )-> List[Any]:
return 32
@property
def _lowerCAmelCase (self :Tuple )-> Dict:
return self.time_input_dim
@property
def _lowerCAmelCase (self :int )-> str:
return self.time_input_dim * 4
@property
def _lowerCAmelCase (self :Optional[Any] )-> Any:
return 100
@property
def _lowerCAmelCase (self :Union[str, Any] )-> List[str]:
__A = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _lowerCAmelCase (self :Union[str, Any] )-> str:
torch.manual_seed(0 )
__A = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__A = MultilingualCLIP(_UpperCamelCase )
__A = text_encoder.eval()
return text_encoder
@property
def _lowerCAmelCase (self :Union[str, Any] )-> Tuple:
torch.manual_seed(0 )
__A = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__A = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def _lowerCAmelCase (self :List[Any] )-> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase (self :Optional[int] )-> Optional[Any]:
torch.manual_seed(0 )
__A = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase (self :List[str] )-> Any:
__A = self.dummy_text_encoder
__A = self.dummy_tokenizer
__A = self.dummy_unet
__A = self.dummy_movq
__A = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__A = DDIMScheduler(**_UpperCamelCase )
__A = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :int , _UpperCamelCase :str=0 )-> Optional[int]:
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_UpperCamelCase )
# create init_image
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
__A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_UpperCamelCase ).startswith('''mps''' ):
__A = torch.manual_seed(_UpperCamelCase )
else:
__A = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
__A = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase (self :Optional[Any] )-> Optional[int]:
__A = '''cpu'''
__A = self.get_dummy_components()
__A = self.pipeline_class(**_UpperCamelCase )
__A = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
__A = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
__A = output.images
__A = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
__A = image[0, -3:, -3:, -1]
__A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Optional[Any] )-> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase (self :Optional[Any] )-> Any:
__A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
__A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__A = '''A red cartoon frog, 4k'''
__A = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
__A = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
__A = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
__A = torch.Generator(device='''cpu''' ).manual_seed(0 )
__A , __A = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__A = pipeline(
_UpperCamelCase , image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 250 |
import sys
def _a ( lowerCamelCase: Tuple ) -> Tuple:
'''simple docstring'''
__A = len(lowerCamelCase )
__A = [[0 for x in range(lowerCamelCase )] for x in range(lowerCamelCase )]
__A = [[0 for x in range(lowerCamelCase )] for x in range(lowerCamelCase )]
for chain_length in range(2 , lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
__A = a + chain_length - 1
__A = sys.maxsize
for c in range(lowerCamelCase , lowerCamelCase ):
__A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__A = cost
__A = c
return matrix, sol
def _a ( lowerCamelCase: Optional[int] , lowerCamelCase: Optional[Any] , lowerCamelCase: List[str] ) -> Tuple:
'''simple docstring'''
if i == j:
print('''A''' + str(lowerCamelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(lowerCamelCase , lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(lowerCamelCase , optimal_solution[i][j] + 1 , lowerCamelCase )
print(''')''' , end=''' ''' )
def _a ( ) -> List[str]:
'''simple docstring'''
__A = [30, 35, 15, 5, 10, 20, 25]
__A = len(lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__A , __A = matrix_chain_order(lowerCamelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 250 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_a : Optional[Any] = None
_a : Tuple = logging.get_logger(__name__)
_a : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
_a : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
_a : List[str] = {
'google/rembert': 256,
}
_a : Dict = '▁'
class __A ( __snake_case ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = RemBertTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__=True , a__=False , a__="[CLS]" , a__="[SEP]" , a__="<unk>" , a__="[SEP]" , a__="<pad>" , a__="[CLS]" , a__="[MASK]" , **a__ , ):
_lowerCAmelCase : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
_lowerCAmelCase : List[Any] = do_lower_case
_lowerCAmelCase : str = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : Union[str, Any] = vocab_file
_lowerCAmelCase : List[Any] = False if not self.vocab_file else True
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = [self.sep_token_id]
_lowerCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(a__ ) )
return
_lowerCAmelCase : Any = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 44 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' )
A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 3 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 181 |
"""simple docstring"""
def _lowerCAmelCase ( ):
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = 1
UpperCAmelCase = 2
while i * i <= n:
UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _lowerCAmelCase ( ):
return next(i for i in triangle_number_generator() if count_divisors(lowercase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 181 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__a :int = True
except ImportError:
__a :Optional[Any] = False
try:
from torch.hub import _get_torch_home
__a :Optional[Any] = _get_torch_home()
except ImportError:
__a :Tuple = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__a :Optional[Any] = os.path.join(torch_cache_home, 'transformers')
__a :int = 'https://cdn.huggingface.co'
__a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__a :str = os.path.join(PATH, 'config.yaml')
__a :str = os.path.join(PATH, 'attributes.txt')
__a :Optional[Any] = os.path.join(PATH, 'objects.txt')
__a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__a :List[str] = 'pytorch_model.bin'
__a :Tuple = 'config.yaml'
def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ):
"""simple docstring"""
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = OrderedDict()
with open(__UpperCamelCase ,"rb" ) as f:
A_ = pkl.load(__UpperCamelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
A_ = ckp.pop(__UpperCamelCase )
if isinstance(__UpperCamelCase ,np.ndarray ):
A_ = torch.tensor(__UpperCamelCase )
else:
assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase )
A_ = v
return r
class _a :
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {}
def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ):
A_ = name
A_ = level
A_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A_ = copy.deepcopy(UpperCAmelCase )
A_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
A_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
A_ = d
def __repr__( self : Optional[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = val
A_ = val
A_ = key.split("." )
A_ = len(UpperCAmelCase ) - 1
A_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
A_ = val
else:
A_ = pointer[l]
def __A ( self : List[str] ):
return self._pointer
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
with open(f'''{file_name}''' , "w" ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def __A ( UpperCAmelCase : Optional[int] ):
with open(UpperCAmelCase ) as stream:
A_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self : str ):
A_ = " "
if self._name != "root":
A_ = f'''{t * (self._level-1)}{self._name}:\n'''
else:
A_ = ""
A_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n'''
A_ = level
return r[:-1]
@classmethod
def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ):
A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ):
A_ = kwargs.pop("cache_dir" , UpperCAmelCase )
A_ = kwargs.pop("force_download" , UpperCAmelCase )
A_ = kwargs.pop("resume_download" , UpperCAmelCase )
A_ = kwargs.pop("proxies" , UpperCAmelCase )
A_ = kwargs.pop("local_files_only" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
A_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
A_ = pretrained_model_name_or_path
else:
A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
A_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
A_ = "Can't load config for"
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(UpperCAmelCase ), kwargs
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = torch.load("dump.pt" ,map_location=in_tensor.device )
A_ = in_tensor.numpy()
A_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), (
f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = urlparse(__UpperCamelCase )
return parsed.scheme in ("http", "https")
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ):
"""simple docstring"""
A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A_ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,):
"""simple docstring"""
A_ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
A_ = {"user-agent": ua}
if resume_size > 0:
A_ = "bytes=%d-" % (resume_size,)
A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
A_ = response.headers.get("Content-Length" )
A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None
A_ = tqdm(
unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__UpperCamelCase ) )
temp_file.write(__UpperCamelCase )
progress.close()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = None
if not local_files_only:
try:
A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase )
if response.status_code == 200:
A_ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase )
# get cache path to put the file
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__UpperCamelCase ):
return cache_path
else:
A_ = [
file
for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(__UpperCamelCase ) > 0:
return os.path.join(__UpperCamelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(__UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A_ = cache_path + ".lock"
with FileLock(__UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A_ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(__UpperCamelCase ,"a+b" ) as f:
yield f
A_ = _resumable_file_manager
if os.path.exists(__UpperCamelCase ):
A_ = os.stat(__UpperCamelCase ).st_size
else:
A_ = 0
else:
A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase )
A_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,)
http_get(
__UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,)
os.replace(temp_file.name ,__UpperCamelCase )
A_ = {"url": url, "etag": etag}
A_ = cache_path + ".json"
with open(__UpperCamelCase ,"w" ) as meta_file:
json.dump(__UpperCamelCase ,__UpperCamelCase )
return cache_path
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ):
"""simple docstring"""
A_ = url.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
A_ = url_hash.hexdigest()
if etag:
A_ = etag.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if is_remote_url(__UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
A_ = get_from_cache(
__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,)
elif os.path.exists(__UpperCamelCase ):
# File, and it exists.
A_ = url_or_filename
elif urlparse(__UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(__UpperCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A_ , A_ = os.path.split(__UpperCamelCase )
A_ = output_file.replace("." ,"-" ) + "-extracted"
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A_ = output_path + ".lock"
with FileLock(__UpperCamelCase ):
shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase )
os.makedirs(__UpperCamelCase )
if is_zipfile(__UpperCamelCase ):
with ZipFile(__UpperCamelCase ,"r" ) as zip_file:
zip_file.extractall(__UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__UpperCamelCase ):
A_ = tarfile.open(__UpperCamelCase )
tar_file.extractall(__UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) )
return output_path_extracted
return output_path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
with open(__UpperCamelCase ) as f:
A_ = eval(f.read() )
else:
A_ = requests.get(__UpperCamelCase )
try:
A_ = requests.json()
except Exception:
A_ = req.content.decode()
assert data is not None, "could not connect"
try:
A_ = eval(__UpperCamelCase )
except Exception:
A_ = data.split("\n" )
req.close()
return data
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = requests.get(__UpperCamelCase )
A_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__UpperCamelCase )
with open(__UpperCamelCase ,"rb" ) as stream:
A_ = pkl.load(__UpperCamelCase )
A_ = weights.pop("model" )
A_ = {}
for k, v in model.items():
A_ = torch.from_numpy(__UpperCamelCase )
if "running_var" in k:
A_ = torch.tensor([0] )
A_ = k.replace("running_var" ,"num_batches_tracked" )
A_ = zero
return new
def __snake_case ( ):
"""simple docstring"""
print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
A_ = cva.imread(__UpperCamelCase )
else:
A_ = get_image_from_url(__UpperCamelCase )
assert img is not None, f'''could not connect to: {im}'''
A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
A_ = img[:, :, ::-1]
return img
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase )) | 312 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[Any] , __a : List[str]=99 , __a : str=13 , __a : Dict=7 , __a : List[str]=9 , __a : Union[str, Any]=True , __a : Any=True , __a : str=False , __a : str=32 , __a : Dict=5 , __a : str=4 , __a : Optional[Any]=37 , __a : List[str]=8 , __a : int=0.1 , __a : str=0.002 , __a : Optional[int]=1 , __a : Dict=0 , __a : Optional[Any]=0 , __a : List[str]=None , __a : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Dict = batch_size
__lowercase : str = encoder_seq_length
__lowercase : int = decoder_seq_length
# For common tests
__lowercase : Dict = self.decoder_seq_length
__lowercase : str = is_training
__lowercase : Union[str, Any] = use_attention_mask
__lowercase : Optional[Any] = use_labels
__lowercase : List[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : str = d_ff
__lowercase : Tuple = relative_attention_num_buckets
__lowercase : Optional[int] = dropout_rate
__lowercase : str = initializer_factor
__lowercase : str = eos_token_id
__lowercase : Dict = pad_token_id
__lowercase : Union[str, Any] = decoder_start_token_id
__lowercase : Optional[Any] = None
__lowercase : str = decoder_layers
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def lowerCAmelCase ( self : Dict , __a : Any , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int]=None , __a : Dict=None , __a : List[Any]=None , __a : int=None , __a : Any=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
__lowercase : Optional[int] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowercase : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowercase : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__a )
if decoder_head_mask is None:
__lowercase : Any = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__a )
if cross_attn_head_mask is None:
__lowercase : str = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : str = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowercase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowercase : Any = input_ids.clamp(self.pad_token_id + 1 )
__lowercase : str = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowercase : Union[str, Any] = self.get_config()
__lowercase : Optional[int] = config.num_attention_heads
__lowercase : List[Any] = self.prepare_inputs_dict(__a , __a , __a )
return config, input_dict
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase ( self : Tuple , __a : List[str] , __a : List[Any] , __a : Any , __a : Any , __a : Tuple , __a : List[str] , ) -> str:
"""simple docstring"""
__lowercase : Dict = UMTaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Any = model(
input_ids=__a , decoder_input_ids=__a , attention_mask=__a , decoder_attention_mask=__a , )
__lowercase : Any = model(input_ids=__a , decoder_input_ids=__a )
__lowercase : Optional[int] = result.last_hidden_state
__lowercase : Tuple = result.past_key_values
__lowercase : str = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Any , __a : str , __a : List[Any] , __a : str , __a : List[str] , ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = UMTaModel(config=__a ).get_decoder().to(__a ).eval()
# first forward pass
__lowercase : List[Any] = model(__a , use_cache=__a )
__lowercase : Dict = model(__a )
__lowercase : List[Any] = model(__a , use_cache=__a )
self.parent.assertTrue(len(__a ) == len(__a ) )
self.parent.assertTrue(len(__a ) == len(__a ) + 1 )
__lowercase , __lowercase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowercase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : Dict = model(__a )["""last_hidden_state"""]
__lowercase : List[str] = model(__a , past_key_values=__a )["""last_hidden_state"""]
# select random slice
__lowercase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
__lowercase : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = UMTaModel(config=__a ).to(__a ).half().eval()
__lowercase : Tuple = model(**__a )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__a ).any().item() )
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_A : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_A : Tuple = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_A : str = True
_A : Tuple = False
_A : List[Any] = False
_A : List[Any] = True
_A : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_A : str = [0.8, 0.9]
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
__lowercase : List[str] = UMTaModel(config_and_inputs[0] ).to(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=__a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase : List[str] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
__lowercase : Any = config_and_inputs[0]
__lowercase : Tuple = UMTaForConditionalGeneration(__a ).eval()
model.to(__a )
__lowercase : Dict = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__a ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
}
for attn_name, (name, mask) in zip(__a , head_masking.items() ):
__lowercase : Dict = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowercase : Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=__a )
__lowercase : int = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__a , return_dict_in_generate=__a , **__a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowercase : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__a ).to(__a )
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__a , legacy=__a )
__lowercase : List[str] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
__lowercase : Dict = tokenizer(__a , return_tensors="""pt""" , padding=__a ).input_ids
# fmt: off
__lowercase : Tuple = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__a , __a )
__lowercase : Union[str, Any] = model.generate(input_ids.to(__a ) )
__lowercase : Optional[int] = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
__lowercase : int = tokenizer.batch_decode(__a )
self.assertEqual(__a , __a ) | 306 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a ) | 306 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCAmelCase ( unittest.TestCase, _a ):
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_tool('''text-to-speech''' )
self.tool.setup()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Tuple = self.tool('''hey''' )
snake_case_ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = self.tool('''hey''' )
snake_case_ : int = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 279 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : int = min_resolution
snake_case_ : Any = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : str = size_divisor
snake_case_ : Optional[Any] = do_rescale
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = GLPNImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) )
self.assertTrue(hasattr(__magic_name__ , '''resample''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 279 | 1 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__lowerCAmelCase: Optional[int] = sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=1_8 , UpperCamelCase__ : List[Any]=3_0 , UpperCamelCase__ : List[str]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=[0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ : str=[0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ : List[str]=True , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase: Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Optional[Any] = image_size
__lowerCAmelCase: Tuple = min_resolution
__lowerCAmelCase: List[str] = max_resolution
__lowerCAmelCase: List[Any] = do_resize
__lowerCAmelCase: Union[str, Any] = size
__lowerCAmelCase: List[Any] = do_center_crop
__lowerCAmelCase: Optional[int] = crop_size
__lowerCAmelCase: Dict = do_normalize
__lowerCAmelCase: List[str] = image_mean
__lowerCAmelCase: Optional[int] = image_std
__lowerCAmelCase: str = do_convert_rgb
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=False)-> List[str]:
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__lowerCAmelCase: Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__lowerCAmelCase: List[str] = []
for i in range(self.batch_size):
__lowerCAmelCase , __lowerCAmelCase: List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__lowerCAmelCase: Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1)) for x in image_inputs]
if torchify:
__lowerCAmelCase: str = [torch.from_numpy(UpperCamelCase__) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase__)
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4})
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8})
__lowerCAmelCase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {"shortest_edge": 4_2})
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4})
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self : Any)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: int = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase: List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Any = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase: Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
__lowerCAmelCase: Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = 3
@property
def lowercase_ ( self : Union[str, Any])-> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
pass
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: int = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 108 | 0 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_snake_case = True
except ImportError:
_snake_case = False
try:
from torch.hub import _get_torch_home
_snake_case = _get_torch_home()
except ImportError:
_snake_case = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_snake_case = os.path.join(torch_cache_home, 'transformers')
_snake_case = 'https://cdn.huggingface.co'
_snake_case = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_snake_case = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_snake_case = os.path.join(PATH, 'config.yaml')
_snake_case = os.path.join(PATH, 'attributes.txt')
_snake_case = os.path.join(PATH, 'objects.txt')
_snake_case = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_snake_case = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_snake_case = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_snake_case = 'pytorch_model.bin'
_snake_case = 'config.yaml'
def _A ( snake_case=OBJECTS , snake_case=ATTRIBUTES ) -> Optional[int]:
_lowercase : List[Any] = []
with open(snake_case ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_lowercase : List[str] = []
with open(snake_case ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def _A ( snake_case ) -> int:
_lowercase : List[str] = OrderedDict()
with open(snake_case , "rb" ) as f:
_lowercase : Dict = pkl.load(snake_case )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_lowercase : Optional[int] = ckp.pop(snake_case )
if isinstance(snake_case , np.ndarray ):
_lowercase : List[Any] = torch.tensor(snake_case )
else:
assert isinstance(snake_case , torch.tensor ), type(snake_case )
_lowercase : Dict = v
return r
class a__ :
_SCREAMING_SNAKE_CASE : Dict = {}
def __init__( self , _UpperCamelCase , _UpperCamelCase = "root" , _UpperCamelCase=0 ):
"""simple docstring"""
_lowercase : Dict = name
_lowercase : List[Any] = level
_lowercase : Any = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_lowercase : int = copy.deepcopy(_UpperCamelCase )
_lowercase : int = copy.deepcopy(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase : List[Any] = Config(_UpperCamelCase , name=_UpperCamelCase , level=level + 1 )
_lowercase : str = v
setattr(self , _UpperCamelCase , _UpperCamelCase )
_lowercase : int = d
def __repr__( self ):
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = val
_lowercase : str = val
_lowercase : List[Any] = key.split("." )
_lowercase : Optional[int] = len(_UpperCamelCase ) - 1
_lowercase : str = self._pointer
if len(_UpperCamelCase ) > 1:
for i, l in enumerate(_UpperCamelCase ):
if hasattr(self , _UpperCamelCase ) and isinstance(getattr(self , _UpperCamelCase ) , _UpperCamelCase ):
setattr(getattr(self , _UpperCamelCase ) , ".".join(levels[i:] ) , _UpperCamelCase )
if l == last_level:
_lowercase : str = val
else:
_lowercase : Optional[Any] = pointer[l]
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._pointer
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with open(f'''{file_name}''' , "w" ) as stream:
dump(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def _lowerCamelCase ( _UpperCamelCase ):
"""simple docstring"""
with open(_UpperCamelCase ) as stream:
_lowercase : Union[str, Any] = load(_UpperCamelCase , Loader=_UpperCamelCase )
return data
def __str__( self ):
"""simple docstring"""
_lowercase : Optional[Any] = " "
if self._name != "root":
_lowercase : List[str] = f'''{t * (self._level-1)}{self._name}:\n'''
else:
_lowercase : str = ""
_lowercase : int = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(_UpperCamelCase ).__name__})\n'''
_lowercase : List[Any] = level
return r[:-1]
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase : str = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
return cls(_UpperCamelCase )
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = kwargs.pop("cache_dir" , _UpperCamelCase )
_lowercase : List[Any] = kwargs.pop("force_download" , _UpperCamelCase )
_lowercase : Union[str, Any] = kwargs.pop("resume_download" , _UpperCamelCase )
_lowercase : str = kwargs.pop("proxies" , _UpperCamelCase )
_lowercase : Tuple = kwargs.pop("local_files_only" , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
_lowercase : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
elif os.path.isfile(_UpperCamelCase ) or is_remote_url(_UpperCamelCase ):
_lowercase : List[Any] = pretrained_model_name_or_path
else:
_lowercase : Tuple = hf_bucket_url(_UpperCamelCase , filename=_UpperCamelCase , use_cdn=_UpperCamelCase )
try:
# Load from URL or cache if already cached
_lowercase : str = cached_path(
_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , local_files_only=_UpperCamelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_lowercase : Union[str, Any] = Config.load_yaml(_UpperCamelCase )
except EnvironmentError:
_lowercase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(_UpperCamelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(_UpperCamelCase ), kwargs
def _A ( snake_case ) -> Optional[Any]:
_lowercase : List[str] = torch.load("dump.pt" , map_location=in_tensor.device )
_lowercase : Any = in_tensor.numpy()
_lowercase : Any = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case , snake_case , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(snake_case , snake_case , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def _A ( snake_case ) -> Optional[Any]:
_lowercase : str = urlparse(snake_case )
return parsed.scheme in ("http", "https")
def _A ( snake_case , snake_case , snake_case=True ) -> str:
_lowercase : Union[str, Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_lowercase : Optional[Any] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def _A ( snake_case , snake_case , snake_case=None , snake_case=0 , snake_case=None , ) -> List[str]:
_lowercase : Any = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join("{}/{}".format(snake_case , snake_case ) for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
_lowercase : Tuple = {"user-agent": ua}
if resume_size > 0:
_lowercase : Optional[Any] = "bytes=%d-" % (resume_size,)
_lowercase : Union[str, Any] = requests.get(snake_case , stream=snake_case , proxies=snake_case , headers=snake_case )
if response.status_code == 4_16: # Range not satisfiable
return
_lowercase : List[Any] = response.headers.get("Content-Length" )
_lowercase : Dict = resume_size + int(snake_case ) if content_length is not None else None
_lowercase : Any = tqdm(
unit="B" , unit_scale=snake_case , total=snake_case , initial=snake_case , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case ) )
temp_file.write(snake_case )
progress.close()
def _A ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=10 , snake_case=False , snake_case=None , snake_case=False , ) -> Optional[Any]:
if cache_dir is None:
_lowercase : Any = TRANSFORMERS_CACHE
if isinstance(snake_case , snake_case ):
_lowercase : Union[str, Any] = str(snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
_lowercase : Any = None
if not local_files_only:
try:
_lowercase : Dict = requests.head(snake_case , allow_redirects=snake_case , proxies=snake_case , timeout=snake_case )
if response.status_code == 2_00:
_lowercase : Union[str, Any] = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_lowercase : int = url_to_filename(snake_case , snake_case )
# get cache path to put the file
_lowercase : Dict = os.path.join(snake_case , snake_case )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case ):
return cache_path
else:
_lowercase : Union[str, Any] = [
file
for file in fnmatch.filter(os.listdir(snake_case ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(snake_case ) > 0:
return os.path.join(snake_case , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(snake_case ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_lowercase : Optional[int] = cache_path + ".lock"
with FileLock(snake_case ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_lowercase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(snake_case , "a+b" ) as f:
yield f
_lowercase : Any = _resumable_file_manager
if os.path.exists(snake_case ):
_lowercase : Tuple = os.stat(snake_case ).st_size
else:
_lowercase : str = 0
else:
_lowercase : Tuple = partial(tempfile.NamedTemporaryFile , dir=snake_case , delete=snake_case )
_lowercase : str = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , snake_case , temp_file.name , )
http_get(
snake_case , snake_case , proxies=snake_case , resume_size=snake_case , user_agent=snake_case , )
os.replace(temp_file.name , snake_case )
_lowercase : int = {"url": url, "etag": etag}
_lowercase : Union[str, Any] = cache_path + ".json"
with open(snake_case , "w" ) as meta_file:
json.dump(snake_case , snake_case )
return cache_path
def _A ( snake_case , snake_case=None ) -> int:
_lowercase : int = url.encode("utf-8" )
_lowercase : int = shaaaa(snake_case )
_lowercase : Any = url_hash.hexdigest()
if etag:
_lowercase : Tuple = etag.encode("utf-8" )
_lowercase : List[str] = shaaaa(snake_case )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def _A ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ) -> Optional[Any]:
if cache_dir is None:
_lowercase : List[str] = TRANSFORMERS_CACHE
if isinstance(snake_case , snake_case ):
_lowercase : int = str(snake_case )
if isinstance(snake_case , snake_case ):
_lowercase : Optional[Any] = str(snake_case )
if is_remote_url(snake_case ):
# URL, so get it from the cache (downloading if necessary)
_lowercase : Tuple = get_from_cache(
snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , user_agent=snake_case , local_files_only=snake_case , )
elif os.path.exists(snake_case ):
# File, and it exists.
_lowercase : int = url_or_filename
elif urlparse(snake_case ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(snake_case ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(snake_case ) )
if extract_compressed_file:
if not is_zipfile(snake_case ) and not tarfile.is_tarfile(snake_case ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_lowercase , _lowercase : Tuple = os.path.split(snake_case )
_lowercase : Any = output_file.replace("." , "-" ) + "-extracted"
_lowercase : Tuple = os.path.join(snake_case , snake_case )
if os.path.isdir(snake_case ) and os.listdir(snake_case ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_lowercase : List[Any] = output_path + ".lock"
with FileLock(snake_case ):
shutil.rmtree(snake_case , ignore_errors=snake_case )
os.makedirs(snake_case )
if is_zipfile(snake_case ):
with ZipFile(snake_case , "r" ) as zip_file:
zip_file.extractall(snake_case )
zip_file.close()
elif tarfile.is_tarfile(snake_case ):
_lowercase : int = tarfile.open(snake_case )
tar_file.extractall(snake_case )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(snake_case ) )
return output_path_extracted
return output_path
def _A ( snake_case , snake_case="," ) -> Optional[int]:
assert isinstance(snake_case , snake_case )
if os.path.isfile(snake_case ):
with open(snake_case ) as f:
_lowercase : List[Any] = eval(f.read() )
else:
_lowercase : str = requests.get(snake_case )
try:
_lowercase : Optional[Any] = requests.json()
except Exception:
_lowercase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_lowercase : Dict = eval(snake_case )
except Exception:
_lowercase : Dict = data.split("\n" )
req.close()
return data
def _A ( snake_case ) -> Optional[Any]:
_lowercase : Optional[int] = requests.get(snake_case )
_lowercase : Dict = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _A ( snake_case ) -> List[Any]:
_lowercase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case )
with open(snake_case , "rb" ) as stream:
_lowercase : int = pkl.load(snake_case )
_lowercase : Any = weights.pop("model" )
_lowercase : int = {}
for k, v in model.items():
_lowercase : Optional[Any] = torch.from_numpy(snake_case )
if "running_var" in k:
_lowercase : List[str] = torch.tensor([0] )
_lowercase : Any = k.replace("running_var" , "num_batches_tracked" )
_lowercase : List[str] = zero
return new
def _A ( ) -> Any:
print(F'''{os.path.abspath(os.path.join(snake_case , os.pardir ) )}/demo.ipynb''' )
def _A ( snake_case , snake_case="RGB" ) -> List[Any]:
assert isinstance(snake_case , snake_case )
if os.path.isfile(snake_case ):
_lowercase : Optional[Any] = cva.imread(snake_case )
else:
_lowercase : List[Any] = get_image_from_url(snake_case )
assert img is not None, F'''could not connect to: {im}'''
_lowercase : Optional[int] = cva.cvtColor(snake_case , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_lowercase : List[Any] = img[:, :, ::-1]
return img
def _A ( snake_case , snake_case=1 ) -> Union[str, Any]:
return (images[i : i + batch] for i in range(0 , len(snake_case ) , snake_case ))
| 250 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = inspect.getfile(accelerate.test_utils )
_lowercase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_lowercase : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_lowercase : Tuple = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
| 250 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : List[str] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 281 |
import math
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
"""simple docstring"""
return math.pow(UpperCAmelCase_ , 2 ) - a
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
return 2 * x
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
a :int = 2.0
while start <= a:
a :int = math.pow(UpperCAmelCase_ , 2 )
return start
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00000000000001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
a :List[Any] = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
a :Optional[int] = value
a :int = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a__ ( lowerCAmelCase__ ) -> str:
re.sub('''<n>''' , '''''' , lowerCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase__ ) )
| 181 | 1 |
def __lowerCamelCase ( snake_case__ = 1 ,snake_case__ = 10_00 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(UpperCAmelCase_ ,digit + 1 ):
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = numerator
for _ in range(1 ,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3FeatureExtractor''']
UpperCamelCase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("""KT""")
_SCREAMING_SNAKE_CASE = TypeVar("""VT""")
class SCREAMING_SNAKE_CASE_ ( Generic[KT, VT] ):
def __init__( self : List[Any] , _A : KT | str = "root" , _A : VT | None = None ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = key
snake_case_ : Union[str, Any] = value
snake_case_ : Tuple = []
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return F"""Node({self.key}: {self.value})"""
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.forward )
class SCREAMING_SNAKE_CASE_ ( Generic[KT, VT] ):
def __init__( self : Tuple , _A : float = 0.5 , _A : int = 16 ) -> int:
"""simple docstring"""
snake_case_ : Any = Node[KT, VT]()
snake_case_ : List[str] = 0
snake_case_ : Union[str, Any] = p
snake_case_ : Dict = max_level
def __str__( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = list(self )
if len(_UpperCAmelCase ) == 0:
return F"""SkipList(level={self.level})"""
snake_case_ : Optional[int] = max((len(str(_UpperCAmelCase ) ) for item in items) , default=4 )
snake_case_ : List[Any] = max(_UpperCAmelCase , 4 ) + 4
snake_case_ : int = self.head
snake_case_ : List[Any] = []
snake_case_ : Tuple = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(_UpperCAmelCase , '-' ) + '* ' * len(_UpperCAmelCase ) )
lines.append(' ' * label_size + '| ' * len(_UpperCAmelCase ) )
while len(node.forward ) != 0:
snake_case_ : int = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(_UpperCAmelCase , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(_UpperCAmelCase ) )
snake_case_ : str = node.forward
lines.append('None'.ljust(_UpperCAmelCase ) + '* ' * len(_UpperCAmelCase ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(_UpperCAmelCase )
def __iter__( self : Any ) -> Any:
"""simple docstring"""
snake_case_ : List[Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : List[str] = node.forward[0]
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
snake_case_ : Any = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase_ ( self : Optional[Any] , _A : Union[str, Any] ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
snake_case_ : List[Any] = []
snake_case_ : Tuple = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase_ ( self : List[str] , _A : KT ) -> Optional[Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self._locate_node(_UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(_UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Dict = update_node.forward[:i]
def UpperCAmelCase_ ( self : Optional[Any] , _A : KT , _A : VT ) -> str:
"""simple docstring"""
snake_case_ ,snake_case_ : int = self._locate_node(_UpperCAmelCase )
if node is not None:
snake_case_ : Optional[Any] = value
else:
snake_case_ : Tuple = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _UpperCAmelCase ):
update_vector.append(self.head )
snake_case_ : Tuple = level
snake_case_ : Optional[Any] = Node(_UpperCAmelCase , _UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_UpperCAmelCase )
else:
snake_case_ : Union[str, Any] = new_node
def UpperCAmelCase_ ( self : Any , _A : VT ) -> VT | None:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self._locate_node(_UpperCAmelCase )
if node is not None:
return node.value
return None
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
snake_case_ : List[Any] = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : str = node.forward[0]
snake_case_ : int = node.value
assert len(__a ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : int = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
snake_case_ : Tuple = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Union[str, Any] = node.forward[0]
snake_case_ : Optional[int] = node.value
if len(__a ) != 4:
print()
assert len(__a ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : str = SkipList()
assert skip_list.find('Some key' ) is None
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : List[str] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : int = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Any = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 1_42 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(__a ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__a )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def SCREAMING_SNAKE_CASE__ ( ):
def is_sorted(__a ):
return all(next_item >= item for item, next_item in zip(__a , lst[1:] ) )
snake_case_ : Optional[int] = SkipList()
for i in range(10 ):
skip_list.insert(__a , __a )
assert is_sorted(list(__a ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__a ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__a ) )
def SCREAMING_SNAKE_CASE__ ( ):
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 327 |
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : torch.FloatTensor
lowerCamelCase_ : Optional[torch.FloatTensor] = None
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase="cosine" , ) -> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case_ : Union[str, Any] = []
for i in range(_UpperCamelCase ):
snake_case_ : Any = i / num_diffusion_timesteps
snake_case_ : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ) , _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase , dtype=torch.floataa )
class __lowerCAmelCase ( _a, _a ):
@register_to_config
def __init__(self , __magic_name__ = 1000 , __magic_name__ = "fixed_small_log" , __magic_name__ = True , __magic_name__ = 1.0 , __magic_name__ = "epsilon" , __magic_name__ = "squaredcos_cap_v2" , ) -> int:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
snake_case_ : Optional[int] = betas_for_alpha_bar(__magic_name__ )
snake_case_ : int = 1.0 - self.betas
snake_case_ : List[Any] = torch.cumprod(self.alphas , dim=0 )
snake_case_ : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
snake_case_ : Union[str, Any] = 1.0
# setable values
snake_case_ : int = None
snake_case_ : Optional[int] = torch.from_numpy(np.arange(0 , __magic_name__ )[::-1].copy() )
snake_case_ : str = variance_type
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Dict:
'''simple docstring'''
snake_case_ : Any = num_inference_steps
snake_case_ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
snake_case_ : Dict = (np.arange(0 , __magic_name__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
snake_case_ : int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None ) -> Dict:
'''simple docstring'''
if prev_timestep is None:
snake_case_ : List[str] = t - 1
snake_case_ : Tuple = self.alphas_cumprod[t]
snake_case_ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ : Any = 1 - alpha_prod_t
snake_case_ : Any = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ : Any = self.betas[t]
else:
snake_case_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ : List[str] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
snake_case_ : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
snake_case_ : Tuple = torch.log(torch.clamp(__magic_name__ , min=1e-20 ) )
snake_case_ : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
snake_case_ : int = variance.log()
snake_case_ : List[Any] = beta.log()
snake_case_ : Optional[int] = (predicted_variance + 1) / 2
snake_case_ : Optional[int] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=None , __magic_name__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
snake_case_ : List[str] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
snake_case_ : Any = torch.split(__magic_name__ , sample.shape[1] , dim=1 )
else:
snake_case_ : Optional[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
snake_case_ : Dict = t - 1
snake_case_ : Optional[int] = self.alphas_cumprod[t]
snake_case_ : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ : str = 1 - alpha_prod_t
snake_case_ : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ : List[Any] = self.betas[t]
snake_case_ : Union[str, Any] = self.alphas[t]
else:
snake_case_ : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
snake_case_ : Tuple = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ : Tuple = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ : Optional[Any] = torch.clamp(
__magic_name__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
snake_case_ : Optional[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case_ : Optional[Any] = 0
if t > 0:
snake_case_ : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__magic_name__ , device=model_output.device )
snake_case_ : Union[str, Any] = self._get_variance(
__magic_name__ , predicted_variance=__magic_name__ , prev_timestep=__magic_name__ , )
if self.variance_type == "fixed_small_log":
snake_case_ : Dict = variance
elif self.variance_type == "learned_range":
snake_case_ : Any = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
snake_case_ : Dict = variance * variance_noise
snake_case_ : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__magic_name__ , pred_original_sample=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ : Any = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
snake_case_ : List[str] = timesteps.to(original_samples.device )
snake_case_ : int = alphas_cumprod[timesteps] ** 0.5
snake_case_ : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ : int = sqrt_alpha_prod.unsqueeze(-1 )
snake_case_ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
snake_case_ : Union[str, Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 357 |
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
snake_case_ : List[Any] = 0
snake_case_ : Tuple = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
snake_case_ : Tuple = [int(_UpperCamelCase ) for i in num_string]
snake_case_ : Dict = 1
for i in range(0 , len(_UpperCamelCase ) ):
total *= numbers[i]
snake_case_ : str = str(_UpperCamelCase )
steps += 1
return steps
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
snake_case_ : Any = 0
snake_case_ : Tuple = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
snake_case_ : List[str] = [int(_UpperCamelCase ) for i in num_string]
snake_case_ : Optional[int] = 0
for i in range(0 , len(_UpperCamelCase ) ):
total += numbers[i]
snake_case_ : Tuple = str(_UpperCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="dpr"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__ = 0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = projection_dim
lowerCAmelCase : Dict = position_embedding_type
| 108 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=0.9_9_9 , UpperCamelCase__="cosine" , ) -> Tuple:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowerCamelCase = []
for i in range(UpperCamelCase__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase : Tuple =[e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self : Any , a : int = 10_00 , a : float = 0.0_00_85 , a : float = 0.0_12 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
__lowerCamelCase = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(a , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
__lowerCamelCase = betas_for_alpha_bar(a , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
__lowerCamelCase = use_karras_sigmas
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[int]=None ):
"""simple docstring"""
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(a ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(a ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
__lowerCamelCase = self.index_for_timestep(a )
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE__ ( self : int , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
"""simple docstring"""
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = np.log(a )
__lowerCamelCase = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
__lowerCamelCase = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
__lowerCamelCase = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(a ).to(device=a )
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.from_numpy(a )
__lowerCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('''mps''' ):
# mps does not support float64
__lowerCamelCase = timesteps.to(a , dtype=torch.floataa )
else:
__lowerCamelCase = timesteps.to(device=a )
# empty dt and derivative
__lowerCamelCase = None
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Tuple , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = np.log(a )
# get distribution
__lowerCamelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowerCamelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = log_sigmas[low_idx]
__lowerCamelCase = log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = np.clip(a , 0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.reshape(sigma.shape )
return t
def SCREAMING_SNAKE_CASE__ ( self : str , a : torch.FloatTensor , a : str ):
"""simple docstring"""
__lowerCamelCase = in_sigmas[-1].item()
__lowerCamelCase = in_sigmas[0].item()
__lowerCamelCase = 7.0 # 7.0 is the value used in the paper
__lowerCamelCase = np.linspace(0 , 1 , a )
__lowerCamelCase = sigma_min ** (1 / rho)
__lowerCamelCase = sigma_max ** (1 / rho)
__lowerCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return self.dt is None
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
"""simple docstring"""
__lowerCamelCase = self.index_for_timestep(a )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
__lowerCamelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
# store for 2nd order step
__lowerCamelCase = derivative
__lowerCamelCase = dt
__lowerCamelCase = sample
else:
# 2. 2nd order / Heun's method
__lowerCamelCase = (sample - pred_original_sample) / sigma_next
__lowerCamelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowerCamelCase = self.dt
__lowerCamelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def SCREAMING_SNAKE_CASE__ ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
"""simple docstring"""
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(a , a ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
"""simple docstring"""
return self.config.num_train_timesteps
| 237 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase ={
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Any ="facebook/nllb-200-distilled-600M"
lowerCamelCase : Optional[Any] =(
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowerCamelCase : Tuple ="translator"
lowerCamelCase : Any =AutoTokenizer
lowerCamelCase : Dict =AutoModelForSeqaSeqLM
lowerCamelCase : Union[str, Any] =LANGUAGE_CODES
lowerCamelCase : Any =["text", "text", "text"]
lowerCamelCase : Union[str, Any] =["text"]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Any , a : List[str] , a : Any ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
__lowerCamelCase = self.lang_to_code[src_lang]
__lowerCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a , return_tensors='''pt''' , src_lang=a , tgt_lang=a )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : str ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=a )
| 237 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( snake_case ):
def __init__( self , _a=None , **_a ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , _a , )
super().__init__(args=_a , **_a )
| 281 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any]=14 , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=99 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : str=512 , _UpperCAmelCase : str=0.02 , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = rotary_dim
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = None
UpperCAmelCase_ = vocab_size - 1
UpperCAmelCase_ = vocab_size - 1
UpperCAmelCase_ = vocab_size - 1
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(_UpperCAmelCase )
UpperCAmelCase_ = model.init_cache(input_ids.shape[0] , _UpperCAmelCase )
UpperCAmelCase_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_ = model(
input_ids[:, :-1] , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCAmelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ = model(
input_ids[:, -1:] , attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(_UpperCAmelCase )
UpperCAmelCase_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(input_ids.shape[0] , _UpperCAmelCase )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_ = model(
input_ids[:, :-1] , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCAmelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCamelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = FlaxGPTJModelTester(self )
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@tooslow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
UpperCAmelCase_ = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )
UpperCAmelCase_ = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
UpperCAmelCase_ = False
UpperCAmelCase_ = model.config.eos_token_id
UpperCAmelCase_ = jax.jit(model.generate )
UpperCAmelCase_ = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@is_pt_flax_cross_test
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = pt_inputs["input_ids"].shape
UpperCAmelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = pt_model_class(_UpperCAmelCase ).eval()
UpperCAmelCase_ = model_class(_UpperCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _UpperCAmelCase )
UpperCAmelCase_ = fx_state
with torch.no_grad():
UpperCAmelCase_ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCAmelCase_ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = model_class.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
UpperCAmelCase_ = fx_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(
len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = pt_model_class(_UpperCAmelCase ).eval()
UpperCAmelCase_ = model_class(_UpperCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ = load_flax_weights_in_pytorch_model(_UpperCAmelCase , fx_model.params )
UpperCAmelCase_ , UpperCAmelCase_ = pt_inputs["input_ids"].shape
UpperCAmelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase_ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCAmelCase_ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = pt_model_class.from_pretrained(_UpperCAmelCase , from_flax=_UpperCAmelCase )
with torch.no_grad():
UpperCAmelCase_ = pt_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(
len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
UpperCAmelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 241 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 241 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowerCAmelCase = 1_92
__lowerCAmelCase = 7_68
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = [8_00, 13_33]
__lowerCAmelCase = False
elif yolos_name == "yolos_s_dWr":
__lowerCAmelCase = 3_30
__lowerCAmelCase = 14
__lowerCAmelCase = 6
__lowerCAmelCase = 13_20
elif "yolos_s" in yolos_name:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 6
elif "yolos_b" in yolos_name:
__lowerCAmelCase = [8_00, 13_44]
__lowerCAmelCase = 91
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "coco-detection-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: config.hidden_size, :]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[-config.hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _a ( SCREAMING_SNAKE_CASE_ : str ):
if "backbone" in name:
__lowerCAmelCase = name.replace("backbone" , "vit" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
__lowerCAmelCase = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
__lowerCAmelCase = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
__lowerCAmelCase = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowerCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
__lowerCAmelCase = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
__lowerCAmelCase = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
__lowerCAmelCase = name.replace("vit.norm" , "vit.layernorm" )
return name
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = val
return orig_state_dict
def _a ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
__lowerCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
# load 🤗 model
__lowerCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowerCAmelCase = 8_00 if yolos_name != "yolos_ti" else 5_12
__lowerCAmelCase = YolosImageProcessor(format="coco_detection" , size=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.logits, outputs.pred_boxes
__lowerCAmelCase , __lowerCAmelCase = None, None
if yolos_name == "yolos_ti":
__lowerCAmelCase = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
__lowerCAmelCase = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
__lowerCAmelCase = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
__lowerCAmelCase = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
__lowerCAmelCase = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
__lowerCAmelCase = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
__lowerCAmelCase = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
__lowerCAmelCase = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
__lowerCAmelCase = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
__lowerCAmelCase = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
__lowerCAmelCase = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
__lowerCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="hustvl" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="hustvl" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 92 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
UpperCAmelCase_ : List[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[Any] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
UpperCAmelCase_ : str = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCAmelCase_ : Dict = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCAmelCase_ : Any = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCAmelCase_ : int = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCAmelCase_ : Any = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = new_key.replace('''encoder''', '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCAmelCase_ : str = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = flax_dict[key]
UpperCAmelCase_ : List[str] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCAmelCase_ : Dict = torch.from_numpy(converted_dict[key].T )
else:
UpperCAmelCase_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[str]=False, SCREAMING_SNAKE_CASE__ : Dict=False ) -> int:
UpperCAmelCase_ : Optional[Any] = get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
UpperCAmelCase_ : List[str] = PixaStructVisionConfig()
UpperCAmelCase_ : List[str] = PixaStructTextConfig()
else:
UpperCAmelCase_ : Dict = PixaStructVisionConfig(
hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 )
UpperCAmelCase_ : Optional[int] = PixaStructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18 )
UpperCAmelCase_ : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
UpperCAmelCase_ : Tuple = PixaStructImageProcessor()
UpperCAmelCase_ : Optional[int] = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
UpperCAmelCase_ : Union[str, Any] = 4096
UpperCAmelCase_ : Union[str, Any] = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
snake_case_ : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 125 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 353 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22 | 0 |
import unittest
import numpy as np
def a ( snake_case__: Optional[Any] , snake_case__: List[str] , snake_case__: List[Any] , snake_case__: Any = None , ):
'''simple docstring'''
lowercase_ = np.shape(_UpperCamelCase )
lowercase_ = np.shape(_UpperCamelCase )
lowercase_ = np.shape(_UpperCamelCase )
if shape_a[0] != shape_b[0]:
lowercase_ = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_UpperCamelCase )
if shape_b[1] != shape_c[1]:
lowercase_ = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_UpperCamelCase )
lowercase_ = pseudo_inv
if a_inv is None:
try:
lowercase_ = np.linalg.inv(_UpperCamelCase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ) -> None:
lowercase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ = np.array([[2, 1], [6, 3]] )
lowercase_ = schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = np.block([[a, b], [b.T, c]] )
lowercase_ = np.linalg.det(SCREAMING_SNAKE_CASE_ )
lowercase_ = np.linalg.det(SCREAMING_SNAKE_CASE_ )
lowercase_ = np.linalg.det(SCREAMING_SNAKE_CASE_ )
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , det_a * det_s )
def _lowercase ( self : int ) -> None:
lowercase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> None:
lowercase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowerCAmelCase_ = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = (images / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ : int = numpy_to_pil(_UpperCamelCase )
return images
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if images.ndim == 3:
snake_case_ : Optional[Any] = images[None, ...]
snake_case_ : Any = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case_ : str = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
snake_case_ : List[Any] = [Image.fromarray(_UpperCamelCase ) for image in images]
return pil_images
| 279 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: float = 0.0 , _UpperCamelCase: float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : List[Any] = """BridgeTowerImageProcessor"""
SCREAMING_SNAKE_CASE_ : List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
super().__init__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
_snake_case = self.tokenizer(
text=_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ,pad_to_multiple_of=_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,return_overflowing_tokens=_SCREAMING_SNAKE_CASE ,return_special_tokens_mask=_SCREAMING_SNAKE_CASE ,return_offsets_mapping=_SCREAMING_SNAKE_CASE ,return_length=_SCREAMING_SNAKE_CASE ,verbose=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# add pixel_values + pixel_mask
_snake_case = self.image_processor(
_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,do_center_crop=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> Any:
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 142 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """unispeech-sat"""
def __init__( self :List[Any] , lowercase_ :Dict=32 , lowercase_ :Union[str, Any]=7_68 , lowercase_ :Any=12 , lowercase_ :Dict=12 , lowercase_ :List[str]=30_72 , lowercase_ :Any="gelu" , lowercase_ :int=0.1 , lowercase_ :Optional[Any]=0.1 , lowercase_ :Optional[int]=0.1 , lowercase_ :Optional[int]=0.0 , lowercase_ :int=0.0 , lowercase_ :Tuple=0.1 , lowercase_ :List[str]=0.1 , lowercase_ :Dict=0.0_2 , lowercase_ :int=1E-5 , lowercase_ :int="group" , lowercase_ :List[Any]="gelu" , lowercase_ :Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ :Optional[int]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ :Any=(10, 3, 3, 3, 3, 2, 2) , lowercase_ :int=False , lowercase_ :str=1_28 , lowercase_ :Tuple=16 , lowercase_ :Union[str, Any]=False , lowercase_ :Tuple=True , lowercase_ :List[str]=0.0_5 , lowercase_ :Any=10 , lowercase_ :Union[str, Any]=2 , lowercase_ :Dict=0.0 , lowercase_ :Optional[Any]=10 , lowercase_ :List[Any]=0 , lowercase_ :Tuple=3_20 , lowercase_ :int=2 , lowercase_ :List[Any]=0.1 , lowercase_ :int=1_00 , lowercase_ :int=2_56 , lowercase_ :Dict=2_56 , lowercase_ :Optional[int]=0.1 , lowercase_ :str="mean" , lowercase_ :Optional[int]=False , lowercase_ :int=False , lowercase_ :Optional[int]=2_56 , lowercase_ :Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , lowercase_ :str=(5, 3, 3, 1, 1) , lowercase_ :int=(1, 2, 3, 1, 1) , lowercase_ :Tuple=5_12 , lowercase_ :Dict=0 , lowercase_ :Union[str, Any]=1 , lowercase_ :Union[str, Any]=2 , lowercase_ :int=5_04 , **lowercase_ :List[Any] , )-> Optional[int]:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(lowercase_ )
A__ = list(lowercase_ )
A__ = list(lowercase_ )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
A__ = num_clusters
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ = list(lowercase_ )
A__ = list(lowercase_ )
A__ = list(lowercase_ )
A__ = xvector_output_dim
@property
def UpperCAmelCase_ ( self :int )-> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 237 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int | float | str ):
try:
A__ = float(_lowerCamelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
A__ = decimal - int(_lowerCamelCase )
if fractional_part == 0:
return int(_lowerCamelCase ), 1
else:
A__ = len(str(_lowerCamelCase ).split("." )[1] )
A__ = int(decimal * (10**number_of_frac_digits) )
A__ = 10**number_of_frac_digits
A__, A__ = denominator, numerator
while True:
A__ = dividend % divisor
if remainder == 0:
break
A__, A__ = divisor, remainder
A__, A__ = numerator / divisor, denominator / divisor
return int(_lowerCamelCase ), int(_lowerCamelCase )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction("67") = }""")
print(f"""{decimal_to_fraction("45.0") = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction("6.25") = }""")
print(f"""{decimal_to_fraction("78td") = }""")
| 237 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__UpperCamelCase : Any = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__UpperCamelCase : Optional[Any] = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__UpperCamelCase : Tuple = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] ,)
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : int ,lowercase_ : Optional[Any] ,lowercase_ : List[str]=False ):
if return_pvalue:
lowerCAmelCase__ : List[str] = pearsonr(_A ,_A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_A ,_A )[0] )}
| 363 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
if not isinstance(A_ , A_ ):
lowerCAmelCase__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(A_ )
if number < 0:
return False
lowerCAmelCase__ : List[Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple = GPTaTokenizer
a_ : Any = GPTaTokenizerFast
a_ : List[Any] = True
a_ : List[str] = {"""add_prefix_space""": True}
a_ : Any = False
def lowerCamelCase ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : List[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCAmelCase_ : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[str] = {"unk_token": "<unk>"}
lowerCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowerCamelCase ( self : int , **a_ : Any ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : Optional[int] , **a_ : Dict ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : int , a_ : Dict ):
lowerCAmelCase_ : Optional[Any] = "lower newer"
lowerCAmelCase_ : Optional[Any] = "lower newer"
return input_text, output_text
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[Any] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : Optional[Any] = "lower newer"
lowerCAmelCase_ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
lowerCAmelCase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : List[str] = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : List[str] = "lower newer"
# Testing tokenization
lowerCAmelCase_ : List[str] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : Union[str, Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCAmelCase_ : str = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : Optional[int] = tokenizer.encode(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : int = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
lowerCAmelCase_ : List[str] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : int , *a_ : str , **a_ : int ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase ( self : List[Any] , a_ : Tuple=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
lowerCAmelCase_ : List[Any] = "This is a simple input"
lowerCAmelCase_ : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Optional[Any] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : str = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Optional[int] = "This is a simple input"
lowerCAmelCase_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : int = tokenizer.pad_token_id
lowerCAmelCase_ : int = tokenizer(a_ , padding="max_length" , max_length=30 , return_tensors="np" )
lowerCAmelCase_ : Any = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
lowerCAmelCase_ : int = tokenizer(*a_ , padding="max_length" , max_length=60 , return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = "$$$"
lowerCAmelCase_ : str = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Any = tokenizer.bos_token_id
lowerCAmelCase_ : int = tokenizer(a_ )
lowerCAmelCase_ : Dict = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Optional[int] ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCAmelCase_ : Optional[int] = [self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : Optional[Any] = "Encode this."
lowerCAmelCase_ : Optional[Any] = "This one too please."
lowerCAmelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCAmelCase_ : List[str] = tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
lowerCAmelCase_ : Optional[Any] = encoded_sequence_dict["input_ids"]
lowerCAmelCase_ : List[Any] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(a_ ) , len(a_ ) )
lowerCAmelCase_ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
lowerCAmelCase_ : Optional[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : Any = "A photo of a cat"
lowerCAmelCase_ : List[Any] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("test_opt" )
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("./test_opt" )
lowerCAmelCase_ : Any = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=a_ )
lowerCAmelCase_ : Any = "A photo of a cat"
lowerCAmelCase_ : Dict = tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : Tuple = "bos"
lowerCAmelCase_ : List[Any] = tokenizer.get_vocab()["bos"]
lowerCAmelCase_ : Optional[Any] = "A photo of a cat"
lowerCAmelCase_ : Optional[int] = tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("./tok" )
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowerCAmelCase_ : Dict = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 241 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
# TODO Update this
lowercase__ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = """esm"""
def __init__( self : Union[str, Any] , a_ : int=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Optional[int]=7_68 , a_ : List[Any]=12 , a_ : List[str]=12 , a_ : Optional[Any]=30_72 , a_ : Optional[Any]=0.1 , a_ : Tuple=0.1 , a_ : Union[str, Any]=10_26 , a_ : List[str]=0.02 , a_ : Optional[int]=1e-1_2 , a_ : int="absolute" , a_ : Union[str, Any]=True , a_ : int=None , a_ : int=False , a_ : Optional[Any]=False , a_ : Any=None , a_ : List[str]=None , **a_ : int , ):
super().__init__(pad_token_id=a_ , mask_token_id=a_ , **a_ )
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : Dict = position_embedding_type
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : str = emb_layer_norm_before
lowerCAmelCase_ : Any = token_dropout
lowerCAmelCase_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowerCAmelCase_ : int = EsmFoldConfig()
elif isinstance(a_ , a_ ):
lowerCAmelCase_ : int = EsmFoldConfig(**a_ )
lowerCAmelCase_ : Tuple = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowerCAmelCase_ : Any = get_default_vocab_list()
else:
lowerCAmelCase_ : Optional[Any] = vocab_list
else:
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , a_ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[str] = super().to_dict()
if isinstance(self.esmfold_config , a_ ):
lowerCAmelCase_ : int = self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : str = None
a_ : bool = True
a_ : bool = False
a_ : bool = False
a_ : bool = False
a_ : float = 0
a_ : bool = True
a_ : bool = False
a_ : int = 128
a_ : "TrunkConfig" = None
def lowerCamelCase ( self : str ):
if self.trunk is None:
lowerCAmelCase_ : List[Any] = TrunkConfig()
elif isinstance(self.trunk , a_ ):
lowerCAmelCase_ : str = TrunkConfig(**self.trunk )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Any = asdict(self )
lowerCAmelCase_ : int = self.trunk.to_dict()
return output
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : int = 48
a_ : int = 1024
a_ : int = 128
a_ : int = 32
a_ : int = 32
a_ : int = 32
a_ : float = 0
a_ : float = 0
a_ : bool = False
a_ : int = 4
a_ : Optional[int] = 128
a_ : "StructureModuleConfig" = None
def lowerCamelCase ( self : Optional[Any] ):
if self.structure_module is None:
lowerCAmelCase_ : Any = StructureModuleConfig()
elif isinstance(self.structure_module , a_ ):
lowerCAmelCase_ : Union[str, Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowerCAmelCase_ : List[str] = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase_ : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = asdict(self )
lowerCAmelCase_ : str = self.structure_module.to_dict()
return output
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : int = 384
a_ : int = 128
a_ : int = 16
a_ : int = 128
a_ : int = 12
a_ : int = 4
a_ : int = 8
a_ : float = 0.1
a_ : int = 8
a_ : int = 1
a_ : int = 2
a_ : int = 7
a_ : int = 10
a_ : float = 1E-8
a_ : float = 1E5
def lowerCamelCase ( self : Optional[int] ):
return asdict(self )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 241 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ : Tuple ={'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A__ : str =5_00_00
A__ : Optional[int] =50_00
A__ , A__ : Optional[int] =os.path.split(__file__)
A__ : Tuple =os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for i in range(lowerCAmelCase ):
_lowerCAmelCase = dataset[i]
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ):
_lowerCAmelCase = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(lowerCAmelCase ):
_lowerCAmelCase = dataset[i]
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(0 , lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = dataset[i : i + batch_size]
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_lowerCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
_lowerCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_lowerCAmelCase = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_lowerCAmelCase = generate_example_dataset(
os.path.join(lowerCAmelCase , """dataset.arrow""" ) , lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase ) )
_lowerCAmelCase = func(lowerCAmelCase , **lowerCAmelCase )
print("""shuffling dataset""" )
_lowerCAmelCase = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowerCAmelCase ) )
_lowerCAmelCase = func(
lowerCAmelCase , **lowerCAmelCase )
with open(lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 220 | 0 |
"""simple docstring"""
def _lowercase ( __snake_case ) -> str:
if isinstance(__lowercase ,__lowercase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(__lowercase ,__lowercase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__lowerCAmelCase : Optional[Any] = False
if num < 0:
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Union[str, Any] = -num
__lowerCAmelCase : str = []
while num > 0:
binary.insert(0 ,num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__lowercase ) for e in binary )
return "0b" + "".join(str(__lowercase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Dict = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 0 |
"""simple docstring"""
def _A ( lowercase ):
"""simple docstring"""
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _A ( lowercase ):
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 81 |
import datasets
from .evaluate import evaluate
_A : Optional[int] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_A : int = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_A : int = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __lowerCamelCase ( self : Dict , A : Tuple , A : Tuple ) ->int:
lowerCamelCase__ : Optional[Any] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase__ : int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ : Tuple = evaluate(dataset=A , predictions=A )
return score
| 142 | 0 |
"""simple docstring"""
def UpperCamelCase ( _A : Optional[int] )-> str:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
A__ = len(_lowercase )
A__ = max(_lowercase )
A__ = min(_lowercase )
# create the counting array
A__ = coll_max + 1 - coll_min
A__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _lowercase ):
A__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
A__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _lowercase ) ):
A__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCamelCase ( _A : List[str] )-> Any:
"""simple docstring"""
return "".join([chr(_lowercase ) for i in counting_sort([ord(_lowercase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase_ : str = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 363 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( _A : Tuple )-> Dict:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCamelCase ( _A : int )-> Optional[Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(_A , _A , bias=_A )
A__ = emb.weight.data
return lin_layer
def UpperCamelCase ( _A : str , _A : Optional[Any]=None )-> str:
"""simple docstring"""
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def UpperCamelCase ( _A : Tuple , _A : Tuple , _A : int , _A : str , _A : str = WEIGHTS_NAME )-> List[str]:
"""simple docstring"""
A__ = []
A__ = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
A__ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
A__ = torch.load(_A )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = os.path.join(
_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
A__ = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(_A ):
A__ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCAmelCase_ : Any = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCAmelCase_ : Tuple = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 198 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = {"facebook/bart-base": BartForConditionalGeneration}
__magic_name__ = {"facebook/bart-base": BartTokenizer}
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=UpperCamelCase_ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=UpperCamelCase_ , default=UpperCamelCase_ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=UpperCamelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCamelCase_ , )
parser.add_argument(
"""--config_name""" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=UpperCamelCase_ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="""Where to store the final ONNX file.""" )
__SCREAMING_SNAKE_CASE = parser.parse_args()
return args
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_="cpu" ):
__SCREAMING_SNAKE_CASE = model_dict[model_name].from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = tokenizer_dict[model_name].from_pretrained(UpperCamelCase_ )
if model_name in ["facebook/bart-base"]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
return huggingface_model, tokenizer
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
model.eval()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = torch.jit.script(BARTBeamSearchGenerator(UpperCamelCase_ ) )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = """My friends are cool but they eat too many carbs."""
__SCREAMING_SNAKE_CASE = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
__SCREAMING_SNAKE_CASE = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=UpperCamelCase_ , max_length=UpperCamelCase_ , early_stopping=UpperCamelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCamelCase_ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCamelCase_ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=UpperCamelCase_ , )
logger.info("""Model exported to {}""".format(UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = remove_dup_initializers(os.path.abspath(UpperCamelCase_ ) )
logger.info("""Deduplicated and optimized model written to {}""".format(UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = onnxruntime.InferenceSession(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = ort_sess.run(
UpperCamelCase_ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(UpperCamelCase_ ),
"""max_length""": np.array(UpperCamelCase_ ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = parse_args()
__SCREAMING_SNAKE_CASE = 5
__SCREAMING_SNAKE_CASE = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__SCREAMING_SNAKE_CASE = torch.device(args.device )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = load_model_tokenizer(args.model_name_or_path , UpperCamelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(UpperCamelCase_ )
if args.max_length:
__SCREAMING_SNAKE_CASE = args.max_length
if args.num_beams:
__SCREAMING_SNAKE_CASE = args.num_beams
if args.output_file_path:
__SCREAMING_SNAKE_CASE = args.output_file_path
else:
__SCREAMING_SNAKE_CASE = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 100 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : list[int] ) -> None:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowercase__ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : Optional[int]=[1, 2, 1] , _UpperCAmelCase : int=[2, 2, 4] , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=2.0 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : str=1e-5 , _UpperCAmelCase : str=True , _UpperCAmelCase : Any=None , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=10 , _UpperCAmelCase : int=8 , _UpperCAmelCase : Optional[Any]=["stage1", "stage2", "stage3"] , _UpperCAmelCase : Optional[Any]=[1, 2, 3] , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = patch_norm
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = is_training
UpperCAmelCase_ = scope
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = encoder_stride
UpperCAmelCase_ = out_features
UpperCAmelCase_ = out_indices
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ = model(_lowercase )
UpperCAmelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
UpperCAmelCase_ = ["stem"]
UpperCAmelCase_ = MaskFormerSwinBackbone(config=_lowercase )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MaskFormerSwinModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with"
" `nn.DataParallel`"
) )
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip("Swin does not use inputs_embeds" )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn\'t support output_attentions" )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
UpperCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn\'t have pretrained checkpoints" )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : List[str] ):
UpperCAmelCase_ = 0
return t
def check_equivalence(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : List[str]={} ):
with torch.no_grad():
UpperCAmelCase_ = model(**_lowercase , return_dict=_lowercase , **_lowercase )
UpperCAmelCase_ = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has"""
F""" `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}."""
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase )
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase )
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {"output_hidden_states": True} )
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {"output_hidden_states": True} )
@require_torch
class lowercase__ ( unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase = MaskFormerSwinConfig
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MaskFormerSwinModelTester(self )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase_ = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
UpperCAmelCase_ = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase_ = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase_ = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions ) | 370 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''rwkv'''
UpperCamelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , _UpperCAmelCase : int=50277 , _UpperCAmelCase : Optional[Any]=1024 , _UpperCAmelCase : str=4096 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Tuple=1e-5 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=6 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = context_length
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = rescale_every
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 241 | 0 |
import math
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __lowerCAmelCase = 0.1 ) -> Dict:
"""simple docstring"""
snake_case__ : Any = 3
snake_case__ : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE ( __snake_case : str = "AAPL" ):
'''simple docstring'''
lowercase = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
lowercase = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' )
lowercase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 220 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( __lowerCAmelCase ):
a__ = ["""image_processor""", """tokenizer"""]
a__ = """AutoImageProcessor"""
a__ = """AutoTokenizer"""
def __init__( self , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
a__: str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
a__: Optional[int] = kwargs.pop('feature_extractor')
a__: Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase , lowercase)
a__: Any = self.image_processor
a__: str = False
def __call__( self , *lowercase , **lowercase) -> str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase)
a__: Any = kwargs.pop('images' , lowercase)
a__: Any = kwargs.pop('text' , lowercase)
if len(lowercase) > 0:
a__: int = args[0]
a__: Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
a__: str = self.image_processor(lowercase , *lowercase , **lowercase)
if text is not None:
a__: List[str] = self.tokenizer(lowercase , **lowercase)
if text is None:
return inputs
elif images is None:
return encodings
else:
a__: Dict = encodings['input_ids']
return inputs
def lowerCamelCase_ ( self , *lowercase , **lowercase) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase)
def lowerCamelCase_ ( self , *lowercase , **lowercase) -> str:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase)
@contextmanager
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
a__: Any = True
a__: Any = self.tokenizer
yield
a__: Optional[Any] = self.image_processor
a__: int = False
def lowerCamelCase_ ( self , lowercase , lowercase=False , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
if added_vocab is None:
a__: List[Any] = self.tokenizer.get_added_vocab()
a__: List[str] = {}
while tokens:
a__: Any = re.search(r'<s_(.*?)>' , lowercase , re.IGNORECASE)
if start_token is None:
break
a__: List[str] = start_token.group(1)
a__: List[Any] = re.search(rf'</s_{key}>' , lowercase , re.IGNORECASE)
a__: Union[str, Any] = start_token.group()
if end_token is None:
a__: Dict = tokens.replace(lowercase , '')
else:
a__: Dict = end_token.group()
a__: int = re.escape(lowercase)
a__: List[str] = re.escape(lowercase)
a__: Any = re.search(f'{start_token_escaped}(.*?){end_token_escaped}' , lowercase , re.IGNORECASE)
if content is not None:
a__: Any = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
a__: List[str] = self.tokenajson(lowercase , is_inner_value=lowercase , added_vocab=lowercase)
if value:
if len(lowercase) == 1:
a__: Tuple = value[0]
a__: int = value
else: # leaf nodes
a__: Any = []
for leaf in content.split(r'<sep/>'):
a__: Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
a__: Tuple = leaf[1:-2] # for categorical special tokens
output[key].append(lowercase)
if len(output[key]) == 1:
a__: str = output[key][0]
a__: str = tokens[tokens.find(lowercase) + len(lowercase) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowercase , added_vocab=lowercase)
if len(lowercase):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase , )
return self.image_processor
| 358 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__: Any = credit_card_number
a__: Tuple = 0
a__: List[str] = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
a__: Tuple = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a__: Optional[Any] = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__: Optional[int] = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 203 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _A ( ):
"""simple docstring"""
a =ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=lowercase )
a =parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
a =parser.parse_args()
if not hasattr(lowercase , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main() | 81 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 0 |
"""simple docstring"""
from timeit import timeit
def a__ ( __lowercase ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_A = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( __lowercase ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_A = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ) -> None:
def do_benchmark(__lowercase ) -> None:
_A = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowercase ) = }""" )
_A = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__lowercase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowercase ) = }""" )
_A = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__lowercase , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 163 |
"""simple docstring"""
def a__ ( __lowercase ) -> int:
assert (
isinstance(__lowercase , __lowercase ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
_A , _A = 1, 1
for _ in range(number_of_steps - 1 ):
_A , _A = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a_ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
lowerCamelCase__ , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if self.framework == "tf":
lowerCamelCase_ =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCamelCase_ =torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=lowerCAmelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_masked_index(lowerCAmelCase )
lowerCamelCase_ =np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''', self.model.base_model_prefix, f'''No mask_token ({self.tokenizer.mask_token}) found on the input''', )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if return_tensors is None:
lowerCamelCase_ =self.framework
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase )
self.ensure_exactly_one_mask_token(lowerCAmelCase )
return model_inputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.model(**lowerCAmelCase )
lowerCamelCase_ =model_inputs['''input_ids''']
return model_outputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=5, lowerCAmelCase=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCamelCase_ =target_ids.shape[0]
lowerCamelCase_ =model_outputs['''input_ids'''][0]
lowerCamelCase_ =model_outputs['''logits''']
if self.framework == "tf":
lowerCamelCase_ =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCamelCase_ =outputs.numpy()
lowerCamelCase_ =outputs[0, masked_index, :]
lowerCamelCase_ =stable_softmax(lowerCAmelCase, axis=-1 )
if target_ids is not None:
lowerCamelCase_ =tf.gather_nd(tf.squeeze(lowerCAmelCase, 0 ), target_ids.reshape(-1, 1 ) )
lowerCamelCase_ =tf.expand_dims(lowerCAmelCase, 0 )
lowerCamelCase_ =tf.math.top_k(lowerCAmelCase, k=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =topk.values.numpy(), topk.indices.numpy()
else:
lowerCamelCase_ =torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCamelCase_ =outputs[0, masked_index, :]
lowerCamelCase_ =logits.softmax(dim=-1 )
if target_ids is not None:
lowerCamelCase_ =probs[..., target_ids]
lowerCamelCase_, lowerCamelCase_ =probs.topk(lowerCAmelCase )
lowerCamelCase_ =[]
lowerCamelCase_ =values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist() ) ):
lowerCamelCase_ =[]
for v, p in zip(_values, _predictions ):
# Copy is important since we're going to modify this array in place
lowerCamelCase_ =input_ids.numpy().copy()
if target_ids is not None:
lowerCamelCase_ =target_ids[p].tolist()
lowerCamelCase_ =p
# Filter padding out:
lowerCamelCase_ =tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ ={'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(lowerCAmelCase )
result.append(lowerCAmelCase )
if single_mask:
return result[0]
return result
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[targets]
try:
lowerCamelCase_ =self.tokenizer.get_vocab()
except Exception:
lowerCamelCase_ ={}
lowerCamelCase_ =[]
for target in targets:
lowerCamelCase_ =vocab.get(lowerCAmelCase, lowerCAmelCase )
if id_ is None:
lowerCamelCase_ =self.tokenizer(
lowerCAmelCase, add_special_tokens=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, max_length=1, truncation=lowerCAmelCase, )['''input_ids''']
if len(lowerCAmelCase ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCamelCase_ =input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
lowerCamelCase_ =list(set(lowerCAmelCase ) )
if len(lowerCAmelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCamelCase_ =np.array(lowerCAmelCase )
return target_ids
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
if targets is not None:
lowerCamelCase_ =self.get_target_ids(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =target_ids
if top_k is not None:
lowerCamelCase_ =top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''', self.model.base_model_prefix, '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self, lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =super().__call__(lowerCAmelCase, **lowerCAmelCase )
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) == 1:
return outputs[0]
return outputs
| 75 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ : str = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : List[str] = '''adapt react readapt apt'''
lowercase__ : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Optional[Any] = '''adapt react readapt apt'''
lowercase__ : Dict = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokens + [tokenizer.unk_token]
lowercase__ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 198 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_UpperCAmelCase : Tuple = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__( lowerCamelCase, lowerCamelCase):
if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2:
raise Exception('''Matrices are not 2x2''')
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase):
if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception('''Odd matrices are not supported!''')
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)]
return top_left, top_right, bot_left, bot_right
def __magic_name__( lowerCamelCase):
return len(lowerCamelCase), len(matrix[0])
def __magic_name__( lowerCamelCase):
print('''\n'''.join(str(lowerCamelCase) for line in matrix))
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase) == (2, 2):
return default_matrix_multiplication(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(lowerCamelCase)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]:
__lowerCAmelCase = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase)
__lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase))))
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
__lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase)
# Removing the additional zeros
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_UpperCAmelCase : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 9 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A , __A ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__A ) )
def _UpperCamelCase ( __A , __A , __A , __A ) -> bool:
'''simple docstring'''
if index == len(__A ):
return True
# Recursive Step
for i in range(__A ):
if valid_coloring(graph[index] , __A , __A ):
# Color current vertex
UpperCamelCase__ = i
# Validate coloring
if util_color(__A , __A , __A , index + 1 ):
return True
# Backtrack
UpperCamelCase__ = -1
return False
def _UpperCamelCase ( __A , __A ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = [-1] * len(__A )
if util_color(__A , __A , __A , 0 ):
return colored_vertices
return []
| 80 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ = logging.get_logger(__name__)
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[int] = """maskformer"""
a_ : Optional[int] = {"""hidden_size""": """mask_feature_size"""}
a_ : Optional[int] = ["""resnet""", """swin"""]
a_ : int = ["""detr"""]
def __init__( self : str , a_ : int = 2_56 , a_ : int = 2_56 , a_ : float = 0.1 , a_ : bool = False , a_ : Optional[Dict] = None , a_ : Optional[Dict] = None , a_ : float = 0.02 , a_ : float = 1.0 , a_ : float = 1.0 , a_ : float = 1.0 , a_ : float = 20.0 , a_ : Optional[bool] = None , **a_ : str , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCAmelCase_ : Tuple = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(a_ , a_ ):
lowerCAmelCase_ : Optional[Any] = backbone_config.pop("model_type" )
lowerCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : str = config_class.from_dict(a_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCAmelCase_ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowerCAmelCase_ : Optional[Any] = (
decoder_config.pop("model_type" ) if isinstance(a_ , a_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(a_ , a_ ):
lowerCAmelCase_ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowerCAmelCase_ : List[Any] = config_class.from_dict(a_ )
lowerCAmelCase_ : str = backbone_config
lowerCAmelCase_ : Tuple = decoder_config
# main feature dimension for the model
lowerCAmelCase_ : str = fpn_feature_size
lowerCAmelCase_ : str = mask_feature_size
# initializer
lowerCAmelCase_ : List[Any] = init_std
lowerCAmelCase_ : Tuple = init_xavier_std
# Hungarian matcher && loss
lowerCAmelCase_ : int = cross_entropy_weight
lowerCAmelCase_ : Dict = dice_weight
lowerCAmelCase_ : int = mask_weight
lowerCAmelCase_ : Any = use_auxiliary_loss
lowerCAmelCase_ : Dict = no_object_weight
lowerCAmelCase_ : Optional[int] = output_auxiliary_logits
lowerCAmelCase_ : int = self.decoder_config.encoder_attention_heads
lowerCAmelCase_ : str = self.decoder_config.num_hidden_layers
super().__init__(**a_ )
@classmethod
def lowerCamelCase ( cls : int , a_ : PretrainedConfig , a_ : PretrainedConfig , **a_ : Tuple ):
return cls(
backbone_config=a_ , decoder_config=a_ , **a_ , )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[int] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[Any] = self.backbone_config.to_dict()
lowerCAmelCase_ : Union[str, Any] = self.decoder_config.to_dict()
lowerCAmelCase_ : List[str] = self.__class__.model_type
return output
| 241 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 253 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCamelCase_ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowerCamelCase_ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowerCamelCase_ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[Any]=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase_ : str = np.array([re.sub(lowerCAmelCase_ , "" , lowerCAmelCase_ ) for x in predictions] )
UpperCAmelCase_ : Dict = np.array([re.sub(lowerCAmelCase_ , "" , lowerCAmelCase_ ) for x in references] )
else:
UpperCAmelCase_ : int = np.asarray(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.asarray(lowerCAmelCase_ )
if ignore_case:
UpperCAmelCase_ : Optional[Any] = np.char.lower(lowerCAmelCase_ )
UpperCAmelCase_ : int = np.char.lower(lowerCAmelCase_ )
if ignore_punctuation:
UpperCAmelCase_ : Any = string.punctuation.maketrans("" , "" , string.punctuation )
UpperCAmelCase_ : Any = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : Any = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
if ignore_numbers:
UpperCAmelCase_ : Dict = string.digits.maketrans("" , "" , string.digits )
UpperCAmelCase_ : Optional[Any] = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : int = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = predictions == references
return {"exact_match": np.mean(lowerCAmelCase_ ) * 100}
| 253 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ : Optional[int] = 16
lowerCAmelCase_ : Any = 32
def _lowerCamelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> List[Any]:
_a = AutoTokenizer.from_pretrained("bert-base-cased" )
_a = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
_a = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ : str = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( lowercase : List[Any] , lowercase : List[Any] ) -> str:
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
_a = 2
# New Code #
_a = int(args.gradient_accumulation_steps )
# Initialize accelerator
_a = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
_a = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
_a = model(**lowercase )
_a = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**lowercase )
_a = outputs.logits.argmax(dim=-1 )
_a = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
def _lowerCamelCase ( ) -> Tuple:
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_a = parser.parse_args()
_a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 63 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case : Dict = 4
snake_case : str = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 203 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 356 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = row, column
UpperCAmelCase__ : str = [[default_value for c in range(_lowerCamelCase)] for r in range(_lowerCamelCase)]
def __str__( self):
UpperCAmelCase__ : Tuple = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
UpperCAmelCase__ : str = 0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase__ : Optional[int] = max(_lowerCamelCase , len(str(_lowerCamelCase)))
UpperCAmelCase__ : List[str] = f'''%{max_element_length}s'''
# Make string and return
def single_line(_lowerCamelCase) -> str:
nonlocal string_format_identifier
UpperCAmelCase__ : List[str] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self):
return str(self)
def snake_case__ ( self , _lowerCamelCase):
if not (isinstance(_lowerCamelCase , (list, tuple)) and len(_lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _lowerCamelCase):
assert self.validate_indicies(_lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self , _lowerCamelCase , _lowerCamelCase):
assert self.validate_indicies(_lowerCamelCase)
UpperCAmelCase__ : Any = value
def __add__( self , _lowerCamelCase):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase__ : List[str] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
UpperCAmelCase__ : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self):
UpperCAmelCase__ : Any = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
UpperCAmelCase__ : List[str] = -self[r, c]
return result
def __sub__( self , _lowerCamelCase):
return self + (-another)
def __mul__( self , _lowerCamelCase):
if isinstance(_lowerCamelCase , (int, float)): # Scalar multiplication
UpperCAmelCase__ : Any = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
UpperCAmelCase__ : Optional[int] = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase__ : Any = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase__ : List[str] = f'''Unsupported type given for another ({type(_lowerCamelCase)})'''
raise TypeError(_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
UpperCAmelCase__ : Union[str, Any] = self[r, c]
return result
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
assert isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase__ : Optional[Any] = v.transpose()
UpperCAmelCase__ : int = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _UpperCamelCase ( ):
# a^(-1)
UpperCAmelCase__ : Union[str, Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCAmelCase__ : Any = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
UpperCAmelCase__ : Union[str, Any] = Matrix(3 , 1 , 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 1, 2, -3
UpperCAmelCase__ : Union[str, Any] = Matrix(3 , 1 , 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}''' )
def _UpperCamelCase ( ):
import doctest
doctest.testmod()
testa() | 163 |
'''simple docstring'''
import torch
from torch import nn
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False):
super().__init__()
UpperCAmelCase__ : List[Any] = n_token
UpperCAmelCase__ : Tuple = d_embed
UpperCAmelCase__ : str = d_proj
UpperCAmelCase__ : str = cutoffs + [n_token]
UpperCAmelCase__ : List[Any] = [0] + self.cutoffs
UpperCAmelCase__ : Optional[Any] = div_val
UpperCAmelCase__ : Optional[int] = self.cutoffs[0]
UpperCAmelCase__ : Optional[int] = len(self.cutoffs) - 1
UpperCAmelCase__ : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
UpperCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters))
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
else:
self.out_projs.append(_lowerCamelCase)
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase))
else:
for i in range(len(self.cutoffs)):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx))
UpperCAmelCase__ : Optional[int] = keep_order
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if proj is None:
UpperCAmelCase__ : Dict = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ : Optional[int] = nn.functional.linear(_lowerCamelCase , proj.t().contiguous())
UpperCAmelCase__ : List[str] = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ : Optional[int] = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ : int = labels[..., 1:].contiguous()
UpperCAmelCase__ : List[str] = hidden.view(-1 , hidden.size(-1))
UpperCAmelCase__ : Optional[int] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
UpperCAmelCase__ : Optional[int] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
UpperCAmelCase__ : Tuple = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
UpperCAmelCase__ : Dict = labels != -100
UpperCAmelCase__ : Tuple = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : List[Any] = (
-nn.functional.log_softmax(_lowerCamelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Dict = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : Union[str, Any] = self.out_layers[i].weight
UpperCAmelCase__ : Any = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : Optional[int] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
if labels is None:
UpperCAmelCase__ : str = hidden.new_empty((head_logit.size(0), self.n_token))
else:
UpperCAmelCase__ : Optional[Any] = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ : List[Any] = labels.index_select(0 , _lowerCamelCase) - l_idx
UpperCAmelCase__ : List[str] = head_logprob.index_select(0 , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = hidden.index_select(0 , _lowerCamelCase)
else:
UpperCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : int = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case__ ( self , _lowerCamelCase):
if self.n_clusters == 0:
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : int = self.out_layers[i].weight
UpperCAmelCase__ : List[str] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : List[Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
UpperCAmelCase__ : int = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : str = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ : Dict = logprob_i
return out | 163 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , ):
"""simple docstring"""
lowercase__ : List[str] = {}
if train_file is not None:
lowercase__ : str = [train_file]
if eval_file is not None:
lowercase__ : int = [eval_file]
if test_file is not None:
lowercase__ : str = [test_file]
lowercase__ : List[Any] = datasets.load_dataset("csv" , data_files=lowerCamelCase__ )
lowercase__ : Optional[int] = list(ds[list(files.keys() )[0]].features.keys() )
lowercase__ : Optional[Any] = features_name.pop(lowerCamelCase__ )
lowercase__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowercase__ : Optional[Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
lowercase__ : List[Any] = tokenizer.model_input_names
lowercase__ : int = {}
if len(lowerCamelCase__ ) == 1:
for k in files.keys():
lowercase__ : int = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="max_length" ) , batched=lowerCamelCase__ , )
elif len(lowerCamelCase__ ) == 2:
for k in files.keys():
lowercase__ : str = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="max_length" , ) , batched=lowerCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowercase__ : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
lowercase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowercase__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
lowercase__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowercase__ : Dict = {k: v for k, v in ex.items() if k in input_names}
lowercase__ : Any = labelaid[ex[label_name]]
yield (d, label)
lowercase__ : int = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowercase__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowercase__ : Any = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowercase__ : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowercase__ : Optional[int] = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowercase__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(metadata={"""help""": """Which column contains the label"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """The path of the training file"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """The path of the development file"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """The path of the test file"""} )
lowercase_ = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowercase__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowercase__ : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase__ ) -> Dict:
lowercase__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowercase__ : List[str] = TFTrainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ : List[Any] = trainer.evaluate()
lowercase__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(lowerCamelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(lowerCamelCase__ )
return results
if __name__ == "__main__":
main()
| 371 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Collection[float] | None = None ):
if components is None:
lowercase__ : List[Any] = []
lowercase__ : str = list(SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return len(self.__components )
def __str__( self : int ):
return "(" + ",".join(map(SCREAMING_SNAKE_CASE , self.__components ) ) + ")"
def __add__( self : List[Any] , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Optional[Any] = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : int = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else:
raise Exception("must have the same size" )
def __sub__( self : Any , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Any = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : int , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , (float, int) ):
lowercase__ : Dict = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(self ) == len(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = len(self )
lowercase__ : Union[str, Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return sum(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("invalid operand!" )
def snake_case ( self : Any ):
return Vector(self.__components )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
assert -len(self.__components ) <= pos < len(self.__components )
lowercase__ : int = value
def snake_case ( self : Any ):
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowercase__ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Vector , SCREAMING_SNAKE_CASE : bool = False ):
lowercase__ : Optional[Any] = self * other
lowercase__ : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
return Vector([0] * dimension )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (isinstance(lowerCamelCase__ , lowerCamelCase__ ))
lowercase__ : Tuple = [0] * dimension
lowercase__ : Union[str, Any] = 1
return Vector(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (isinstance(lowerCamelCase__ , (int, float) ))
)
return x * scalar + y
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : Optional[Any] = [random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : list[list[float]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = matrix
lowercase__ : int = w
lowercase__ : str = h
def __str__( self : int ):
lowercase__ : int = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Union[str, Any] = []
for i in range(self.__height ):
lowercase__ : int = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Dict , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Dict = []
for i in range(self.__height ):
lowercase__ : Optional[Any] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Any , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Optional[Any] , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # matrix-vector
if len(SCREAMING_SNAKE_CASE ) == self.__width:
lowercase__ : Any = zero_vector(self.__height )
for i in range(self.__height ):
lowercase__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE , sum(SCREAMING_SNAKE_CASE ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(SCREAMING_SNAKE_CASE , (int, float) ): # matrix-scalar
lowercase__ : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
return None
def snake_case ( self : Union[str, Any] ):
return self.__height
def snake_case ( self : Optional[Any] ):
return self.__width
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Union[str, Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowercase__ : str = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
raise Exception("Indices out of bounds" )
def snake_case ( self : List[str] ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : list[list[float]] = [[0] * n for _ in range(lowerCamelCase__ )]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : list[list[float]] = [
[random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )
]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 121 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.